mirror of
https://github.com/jpawlowski/hass.tibber_prices.git
synced 2026-03-30 05:13:40 +00:00
Compare commits
74 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
994eecdd3d | ||
|
|
4d822030f9 | ||
|
|
b92becdf8f | ||
|
|
566ccf4017 | ||
|
|
0381749e6f | ||
|
|
00a653396c | ||
|
|
dbe73452f7 | ||
|
|
9123903b7f | ||
|
|
5cab2a37b0 | ||
|
|
e796660112 | ||
|
|
719344e11f | ||
|
|
a59096eeff | ||
|
|
afd626af05 | ||
|
|
e429dcf945 | ||
|
|
86c28acead | ||
|
|
92520051e4 | ||
|
|
ee7fc623a7 | ||
|
|
da64cc4805 | ||
|
|
981089fe68 | ||
|
|
d3f3975204 | ||
|
|
49cdb2c28a | ||
|
|
73b7f0b2ca | ||
|
|
152f104ef0 | ||
|
|
72b42460a0 | ||
|
|
1bf031ba19 | ||
|
|
89880c7755 | ||
|
|
631cebeb55 | ||
|
|
cc75bc53ee | ||
|
|
b541f7b15e | ||
|
|
2f36c73c18 | ||
|
|
1b22ce3f2a | ||
|
|
5fc1f4db33 | ||
|
|
972cbce1d3 | ||
|
|
f88d6738e6 | ||
|
|
4b32568665 | ||
|
|
4ceff6cf5f | ||
|
|
285258c325 | ||
|
|
3e6bcf2345 | ||
|
|
0a4af0de2f | ||
|
|
09a50dccff | ||
|
|
665fac10fc | ||
|
|
c6b34984fa | ||
|
|
3624f1c9a8 | ||
|
|
3968dba9d2 | ||
|
|
3157c6f0df | ||
|
|
e851cb0670 | ||
|
|
15e09fa210 | ||
|
|
c6d6e4a5b2 | ||
|
|
23b4330b9a | ||
|
|
81ebfb4916 | ||
|
|
a437d22b7a | ||
|
|
9eea984d1f | ||
|
|
9b34d416bc | ||
|
|
cfc7cf6abc | ||
|
|
78df8a4b17 | ||
|
|
7adc56bf79 | ||
|
|
94615dc6cd | ||
|
|
fc64aecdd9 | ||
|
|
db0de2376b | ||
|
|
4971ab92d6 | ||
|
|
49b8a018e7 | ||
|
|
4158e7b1fd | ||
|
|
5ef0396c8b | ||
|
|
7ee013daf2 | ||
|
|
325d855997 | ||
|
|
70552459ce | ||
|
|
11d4cbfd09 | ||
|
|
f57997b119 | ||
|
|
64cf842719 | ||
|
|
ba032a1c94 | ||
|
|
ced9d8656b | ||
|
|
941f903a9c | ||
|
|
ada17f6d90 | ||
|
|
5cc71901b9 |
199 changed files with 35461 additions and 3654 deletions
|
|
@ -1,10 +1,11 @@
|
||||||
{
|
{
|
||||||
"name": "jpawlowski/hass.tibber_prices",
|
"name": "jpawlowski/hass.tibber_prices",
|
||||||
"image": "mcr.microsoft.com/devcontainers/python:3.13",
|
"image": "mcr.microsoft.com/devcontainers/python:3.14",
|
||||||
"postCreateCommand": "bash .devcontainer/setup-git.sh && scripts/setup/setup",
|
"postCreateCommand": "bash .devcontainer/setup-git.sh && scripts/setup/setup",
|
||||||
"postStartCommand": "scripts/motd",
|
"postStartCommand": "scripts/motd",
|
||||||
"containerEnv": {
|
"containerEnv": {
|
||||||
"PYTHONASYNCIODEBUG": "1"
|
"PYTHONASYNCIODEBUG": "1",
|
||||||
|
"TIBBER_PRICES_DEV": "1"
|
||||||
},
|
},
|
||||||
"forwardPorts": [
|
"forwardPorts": [
|
||||||
8123,
|
8123,
|
||||||
|
|
@ -69,7 +70,7 @@
|
||||||
],
|
],
|
||||||
"python.defaultInterpreterPath": "${workspaceFolder}/.venv/bin/python",
|
"python.defaultInterpreterPath": "${workspaceFolder}/.venv/bin/python",
|
||||||
"python.analysis.extraPaths": [
|
"python.analysis.extraPaths": [
|
||||||
"${workspaceFolder}/.venv/lib/python3.13/site-packages"
|
"${workspaceFolder}/.venv/lib/python3.14/site-packages"
|
||||||
],
|
],
|
||||||
"python.terminal.activateEnvironment": true,
|
"python.terminal.activateEnvironment": true,
|
||||||
"python.terminal.activateEnvInCurrentTerminal": true,
|
"python.terminal.activateEnvInCurrentTerminal": true,
|
||||||
|
|
|
||||||
6
.github/workflows/auto-tag.yml
vendored
6
.github/workflows/auto-tag.yml
vendored
|
|
@ -43,13 +43,13 @@ jobs:
|
||||||
echo "✗ Tag v${{ steps.manifest.outputs.version }} does not exist yet"
|
echo "✗ Tag v${{ steps.manifest.outputs.version }} does not exist yet"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Validate version format
|
- name: Validate version format (stable or beta)
|
||||||
if: steps.tag_check.outputs.exists == 'false'
|
if: steps.tag_check.outputs.exists == 'false'
|
||||||
run: |
|
run: |
|
||||||
VERSION="${{ steps.manifest.outputs.version }}"
|
VERSION="${{ steps.manifest.outputs.version }}"
|
||||||
if ! echo "$VERSION" | grep -qE '^[0-9]+\.[0-9]+\.[0-9]+$'; then
|
if ! echo "$VERSION" | grep -qE '^[0-9]+\.[0-9]+\.[0-9]+(b[0-9]+)?$'; then
|
||||||
echo "❌ Invalid version format: $VERSION"
|
echo "❌ Invalid version format: $VERSION"
|
||||||
echo "Expected format: X.Y.Z (e.g., 1.0.0)"
|
echo "Expected format: X.Y.Z or X.Y.ZbN (e.g., 1.0.0, 0.25.0b0)"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo "✓ Version format valid: $VERSION"
|
echo "✓ Version format valid: $VERSION"
|
||||||
|
|
|
||||||
25
.github/workflows/docusaurus.yml
vendored
25
.github/workflows/docusaurus.yml
vendored
|
|
@ -33,6 +33,17 @@ jobs:
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0 # Needed for version timestamps
|
fetch-depth: 0 # Needed for version timestamps
|
||||||
|
|
||||||
|
- name: Detect prerelease tag (beta/rc)
|
||||||
|
id: taginfo
|
||||||
|
run: |
|
||||||
|
if [[ "${GITHUB_REF}" =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+(b[0-9]+|rc[0-9]+)$ ]]; then
|
||||||
|
echo "is_prerelease=true" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "Detected prerelease tag: ${GITHUB_REF}"
|
||||||
|
else
|
||||||
|
echo "is_prerelease=false" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "Stable tag or branch: ${GITHUB_REF}"
|
||||||
|
fi
|
||||||
|
|
||||||
- uses: actions/setup-node@v6
|
- uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
node-version: 24
|
node-version: 24
|
||||||
|
|
@ -47,7 +58,7 @@ jobs:
|
||||||
run: npm ci
|
run: npm ci
|
||||||
|
|
||||||
- name: Create user docs version snapshot on tag
|
- name: Create user docs version snapshot on tag
|
||||||
if: startsWith(github.ref, 'refs/tags/v')
|
if: startsWith(github.ref, 'refs/tags/v') && steps.taginfo.outputs.is_prerelease != 'true'
|
||||||
working-directory: docs/user
|
working-directory: docs/user
|
||||||
run: |
|
run: |
|
||||||
TAG_VERSION=${GITHUB_REF#refs/tags/}
|
TAG_VERSION=${GITHUB_REF#refs/tags/}
|
||||||
|
|
@ -61,7 +72,7 @@ jobs:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Cleanup old user docs versions
|
- name: Cleanup old user docs versions
|
||||||
if: startsWith(github.ref, 'refs/tags/v')
|
if: startsWith(github.ref, 'refs/tags/v') && steps.taginfo.outputs.is_prerelease != 'true'
|
||||||
working-directory: docs/user
|
working-directory: docs/user
|
||||||
run: |
|
run: |
|
||||||
chmod +x ../cleanup-old-versions.sh
|
chmod +x ../cleanup-old-versions.sh
|
||||||
|
|
@ -80,7 +91,7 @@ jobs:
|
||||||
run: npm ci
|
run: npm ci
|
||||||
|
|
||||||
- name: Create developer docs version snapshot on tag
|
- name: Create developer docs version snapshot on tag
|
||||||
if: startsWith(github.ref, 'refs/tags/v')
|
if: startsWith(github.ref, 'refs/tags/v') && steps.taginfo.outputs.is_prerelease != 'true'
|
||||||
working-directory: docs/developer
|
working-directory: docs/developer
|
||||||
run: |
|
run: |
|
||||||
TAG_VERSION=${GITHUB_REF#refs/tags/}
|
TAG_VERSION=${GITHUB_REF#refs/tags/}
|
||||||
|
|
@ -94,7 +105,7 @@ jobs:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Cleanup old developer docs versions
|
- name: Cleanup old developer docs versions
|
||||||
if: startsWith(github.ref, 'refs/tags/v')
|
if: startsWith(github.ref, 'refs/tags/v') && steps.taginfo.outputs.is_prerelease != 'true'
|
||||||
working-directory: docs/developer
|
working-directory: docs/developer
|
||||||
run: |
|
run: |
|
||||||
chmod +x ../cleanup-old-versions.sh
|
chmod +x ../cleanup-old-versions.sh
|
||||||
|
|
@ -118,7 +129,7 @@ jobs:
|
||||||
|
|
||||||
# COMMIT VERSION SNAPSHOTS
|
# COMMIT VERSION SNAPSHOTS
|
||||||
- name: Commit version snapshots back to repository
|
- name: Commit version snapshots back to repository
|
||||||
if: startsWith(github.ref, 'refs/tags/v')
|
if: startsWith(github.ref, 'refs/tags/v') && steps.taginfo.outputs.is_prerelease != 'true'
|
||||||
run: |
|
run: |
|
||||||
TAG_VERSION=${GITHUB_REF#refs/tags/}
|
TAG_VERSION=${GITHUB_REF#refs/tags/}
|
||||||
|
|
||||||
|
|
@ -140,7 +151,7 @@ jobs:
|
||||||
|
|
||||||
# DEPLOY TO GITHUB PAGES
|
# DEPLOY TO GITHUB PAGES
|
||||||
- name: Setup Pages
|
- name: Setup Pages
|
||||||
uses: actions/configure-pages@v5
|
uses: actions/configure-pages@v6
|
||||||
|
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
uses: actions/upload-pages-artifact@v4
|
uses: actions/upload-pages-artifact@v4
|
||||||
|
|
@ -149,4 +160,4 @@ jobs:
|
||||||
|
|
||||||
- name: Deploy to GitHub Pages
|
- name: Deploy to GitHub Pages
|
||||||
id: deployment
|
id: deployment
|
||||||
uses: actions/deploy-pages@v4
|
uses: actions/deploy-pages@v5
|
||||||
|
|
|
||||||
6
.github/workflows/lint.yml
vendored
6
.github/workflows/lint.yml
vendored
|
|
@ -29,12 +29,12 @@ jobs:
|
||||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
|
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||||
with:
|
with:
|
||||||
python-version: "3.13"
|
python-version: "3.14"
|
||||||
|
|
||||||
- name: Install uv
|
- name: Install uv
|
||||||
uses: astral-sh/setup-uv@681c641aba71e4a1c380be3ab5e12ad51f415867 # v7.1.6
|
uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7.6.0
|
||||||
with:
|
with:
|
||||||
version: "0.9.3"
|
version: "0.9.3"
|
||||||
|
|
||||||
|
|
|
||||||
18
.github/workflows/release.yml
vendored
18
.github/workflows/release.yml
vendored
|
|
@ -135,10 +135,20 @@ jobs:
|
||||||
FEAT=$(echo "$COMMITS" | grep -cE "^feat(\(.+\))?:" || true)
|
FEAT=$(echo "$COMMITS" | grep -cE "^feat(\(.+\))?:" || true)
|
||||||
FIX=$(echo "$COMMITS" | grep -cE "^fix(\(.+\))?:" || true)
|
FIX=$(echo "$COMMITS" | grep -cE "^fix(\(.+\))?:" || true)
|
||||||
|
|
||||||
# Parse versions
|
parse_version() {
|
||||||
|
local version="$1"
|
||||||
|
if [[ $version =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)(b[0-9]+)?$ ]]; then
|
||||||
|
echo "${BASH_REMATCH[1]} ${BASH_REMATCH[2]} ${BASH_REMATCH[3]} ${BASH_REMATCH[4]}"
|
||||||
|
else
|
||||||
|
echo "Invalid version format: $version" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse versions (support beta/prerelease suffix like 0.25.0b0)
|
||||||
PREV_VERSION="${PREV_TAG#v}"
|
PREV_VERSION="${PREV_TAG#v}"
|
||||||
IFS='.' read -r PREV_MAJOR PREV_MINOR PREV_PATCH <<< "$PREV_VERSION"
|
read -r PREV_MAJOR PREV_MINOR PREV_PATCH PREV_PRERELEASE <<< "$(parse_version "$PREV_VERSION")"
|
||||||
IFS='.' read -r MAJOR MINOR PATCH <<< "$TAG_VERSION"
|
read -r MAJOR MINOR PATCH PRERELEASE <<< "$(parse_version "$TAG_VERSION")"
|
||||||
|
|
||||||
WARNING=""
|
WARNING=""
|
||||||
SUGGESTION=""
|
SUGGESTION=""
|
||||||
|
|
@ -245,7 +255,7 @@ jobs:
|
||||||
name: ${{ steps.release_notes.outputs.title }}
|
name: ${{ steps.release_notes.outputs.title }}
|
||||||
body: ${{ steps.release_notes.outputs.notes }}
|
body: ${{ steps.release_notes.outputs.notes }}
|
||||||
draft: false
|
draft: false
|
||||||
prerelease: false
|
prerelease: ${{ contains(github.ref, 'b') }}
|
||||||
generate_release_notes: false # We provide our own
|
generate_release_notes: false # We provide our own
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
|
||||||
2
.github/workflows/validate.yml
vendored
2
.github/workflows/validate.yml
vendored
|
|
@ -32,7 +32,7 @@ jobs:
|
||||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
|
|
||||||
- name: Run hassfest validation
|
- name: Run hassfest validation
|
||||||
uses: home-assistant/actions/hassfest@87c064c607f3c5cc673a24258d0c98d23033bfc3 # master
|
uses: home-assistant/actions/hassfest@d56d093b9ab8d2105bc0cb6ee9bcc0ef4ec8b96d # master
|
||||||
|
|
||||||
hacs: # https://github.com/hacs/action
|
hacs: # https://github.com/hacs/action
|
||||||
name: HACS validation
|
name: HACS validation
|
||||||
|
|
|
||||||
12
AGENTS.md
12
AGENTS.md
|
|
@ -1838,12 +1838,12 @@ This is a Home Assistant standard to avoid naming conflicts between integrations
|
||||||
# ✅ CORRECT - Integration prefix + semantic purpose
|
# ✅ CORRECT - Integration prefix + semantic purpose
|
||||||
class TibberPricesApiClient: # Integration + semantic role
|
class TibberPricesApiClient: # Integration + semantic role
|
||||||
class TibberPricesDataUpdateCoordinator: # Integration + semantic role
|
class TibberPricesDataUpdateCoordinator: # Integration + semantic role
|
||||||
class TibberPricesDataFetcher: # Integration + semantic role
|
class TibberPricesPriceDataManager: # Integration + semantic role
|
||||||
class TibberPricesSensor: # Integration + entity type
|
class TibberPricesSensor: # Integration + entity type
|
||||||
class TibberPricesEntity: # Integration + entity type
|
class TibberPricesEntity: # Integration + entity type
|
||||||
|
|
||||||
# ❌ INCORRECT - Missing integration prefix
|
# ❌ INCORRECT - Missing integration prefix
|
||||||
class DataFetcher: # Should be: TibberPricesDataFetcher
|
class PriceDataManager: # Should be: TibberPricesPriceDataManager
|
||||||
class TimeService: # Should be: TibberPricesTimeService
|
class TimeService: # Should be: TibberPricesTimeService
|
||||||
class PeriodCalculator: # Should be: TibberPricesPeriodCalculator
|
class PeriodCalculator: # Should be: TibberPricesPeriodCalculator
|
||||||
|
|
||||||
|
|
@ -1855,11 +1855,11 @@ class TibberPricesSensorCalculatorTrend: # Too verbose, import path shows loca
|
||||||
**IMPORTANT:** Do NOT include package hierarchy in class names. Python's import system provides the namespace:
|
**IMPORTANT:** Do NOT include package hierarchy in class names. Python's import system provides the namespace:
|
||||||
```python
|
```python
|
||||||
# The import path IS the full namespace:
|
# The import path IS the full namespace:
|
||||||
from custom_components.tibber_prices.coordinator.data_fetching import TibberPricesDataFetcher
|
from custom_components.tibber_prices.coordinator.price_data_manager import TibberPricesPriceDataManager
|
||||||
from custom_components.tibber_prices.sensor.calculators.trend import TibberPricesTrendCalculator
|
from custom_components.tibber_prices.sensor.calculators.trend import TibberPricesTrendCalculator
|
||||||
|
|
||||||
# Adding package names to class would be redundant:
|
# Adding package names to class would be redundant:
|
||||||
# TibberPricesCoordinatorDataFetcher ❌ NO - unnecessarily verbose
|
# TibberPricesCoordinatorPriceDataManager ❌ NO - unnecessarily verbose
|
||||||
# TibberPricesSensorCalculatorsTrendCalculator ❌ NO - ridiculously long
|
# TibberPricesSensorCalculatorsTrendCalculator ❌ NO - ridiculously long
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
@ -1905,14 +1905,14 @@ result = _InternalHelper().process()
|
||||||
|
|
||||||
**Example of genuine private class use case:**
|
**Example of genuine private class use case:**
|
||||||
```python
|
```python
|
||||||
# In coordinator/data_fetching.py
|
# In coordinator/price_data_manager.py
|
||||||
class _ApiRetryStateMachine:
|
class _ApiRetryStateMachine:
|
||||||
"""Internal state machine for retry logic. Never used outside this file."""
|
"""Internal state machine for retry logic. Never used outside this file."""
|
||||||
def __init__(self, max_retries: int) -> None:
|
def __init__(self, max_retries: int) -> None:
|
||||||
self._attempts = 0
|
self._attempts = 0
|
||||||
self._max_retries = max_retries
|
self._max_retries = max_retries
|
||||||
|
|
||||||
# Only used by DataFetcher methods in this file
|
# Only used by PriceDataManager methods in this file
|
||||||
```
|
```
|
||||||
|
|
||||||
In practice, most "helper" logic should be **functions**, not classes. Reserve classes for stateful components.
|
In practice, most "helper" logic should be **functions**, not classes. Reserve classes for stateful components.
|
||||||
|
|
|
||||||
|
|
@ -49,6 +49,8 @@ logger:
|
||||||
custom_components.tibber_prices.coordinator.period_handlers.period_overlap.details: info
|
custom_components.tibber_prices.coordinator.period_handlers.period_overlap.details: info
|
||||||
# Outlier flex capping
|
# Outlier flex capping
|
||||||
custom_components.tibber_prices.coordinator.period_handlers.core.details: info
|
custom_components.tibber_prices.coordinator.period_handlers.core.details: info
|
||||||
|
# Level filtering details (min_distance scaling)
|
||||||
|
custom_components.tibber_prices.coordinator.period_handlers.level_filtering.details: info
|
||||||
|
|
||||||
# Interval pool details (cache operations, GC):
|
# Interval pool details (cache operations, GC):
|
||||||
# Cache lookup/miss, gap detection, fetch group additions
|
# Cache lookup/miss, gap detection, fetch group additions
|
||||||
|
|
|
||||||
|
|
@ -47,6 +47,8 @@ if TYPE_CHECKING:
|
||||||
PLATFORMS: list[Platform] = [
|
PLATFORMS: list[Platform] = [
|
||||||
Platform.SENSOR,
|
Platform.SENSOR,
|
||||||
Platform.BINARY_SENSOR,
|
Platform.BINARY_SENSOR,
|
||||||
|
Platform.NUMBER,
|
||||||
|
Platform.SWITCH,
|
||||||
]
|
]
|
||||||
|
|
||||||
# Configuration schema for configuration.yaml
|
# Configuration schema for configuration.yaml
|
||||||
|
|
@ -298,6 +300,9 @@ async def async_unload_entry(
|
||||||
await async_save_pool_state(hass, entry.entry_id, pool_state)
|
await async_save_pool_state(hass, entry.entry_id, pool_state)
|
||||||
LOGGER.debug("[%s] Interval pool state saved on unload", entry.title)
|
LOGGER.debug("[%s] Interval pool state saved on unload", entry.title)
|
||||||
|
|
||||||
|
# Shutdown interval pool (cancels background tasks)
|
||||||
|
await entry.runtime_data.interval_pool.async_shutdown()
|
||||||
|
|
||||||
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
|
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
|
||||||
|
|
||||||
if unload_ok and entry.runtime_data is not None:
|
if unload_ok and entry.runtime_data is not None:
|
||||||
|
|
|
||||||
|
|
@ -207,6 +207,8 @@ def add_price_attributes(attributes: dict, current_period: dict, factor: int) ->
|
||||||
attributes["price_max"] = round(current_period["price_max"] * factor, precision)
|
attributes["price_max"] = round(current_period["price_max"] * factor, precision)
|
||||||
if "price_spread" in current_period:
|
if "price_spread" in current_period:
|
||||||
attributes["price_spread"] = round(current_period["price_spread"] * factor, precision)
|
attributes["price_spread"] = round(current_period["price_spread"] * factor, precision)
|
||||||
|
if "price_coefficient_variation_%" in current_period:
|
||||||
|
attributes["price_coefficient_variation_%"] = current_period["price_coefficient_variation_%"]
|
||||||
if "volatility" in current_period:
|
if "volatility" in current_period:
|
||||||
attributes["volatility"] = current_period["volatility"] # Volatility is not a price, keep as-is
|
attributes["volatility"] = current_period["volatility"] # Volatility is not a price, keep as-is
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,7 @@ from .config_flow_handlers.schemas import (
|
||||||
get_best_price_schema,
|
get_best_price_schema,
|
||||||
get_options_init_schema,
|
get_options_init_schema,
|
||||||
get_peak_price_schema,
|
get_peak_price_schema,
|
||||||
|
get_price_level_schema,
|
||||||
get_price_rating_schema,
|
get_price_rating_schema,
|
||||||
get_price_trend_schema,
|
get_price_trend_schema,
|
||||||
get_reauth_confirm_schema,
|
get_reauth_confirm_schema,
|
||||||
|
|
@ -41,6 +42,7 @@ __all__ = [
|
||||||
"get_best_price_schema",
|
"get_best_price_schema",
|
||||||
"get_options_init_schema",
|
"get_options_init_schema",
|
||||||
"get_peak_price_schema",
|
"get_peak_price_schema",
|
||||||
|
"get_price_level_schema",
|
||||||
"get_price_rating_schema",
|
"get_price_rating_schema",
|
||||||
"get_price_trend_schema",
|
"get_price_trend_schema",
|
||||||
"get_reauth_confirm_schema",
|
"get_reauth_confirm_schema",
|
||||||
|
|
|
||||||
|
|
@ -27,6 +27,7 @@ from custom_components.tibber_prices.config_flow_handlers.schemas import (
|
||||||
get_best_price_schema,
|
get_best_price_schema,
|
||||||
get_options_init_schema,
|
get_options_init_schema,
|
||||||
get_peak_price_schema,
|
get_peak_price_schema,
|
||||||
|
get_price_level_schema,
|
||||||
get_price_rating_schema,
|
get_price_rating_schema,
|
||||||
get_price_trend_schema,
|
get_price_trend_schema,
|
||||||
get_reauth_confirm_schema,
|
get_reauth_confirm_schema,
|
||||||
|
|
@ -56,6 +57,7 @@ __all__ = [
|
||||||
"get_best_price_schema",
|
"get_best_price_schema",
|
||||||
"get_options_init_schema",
|
"get_options_init_schema",
|
||||||
"get_peak_price_schema",
|
"get_peak_price_schema",
|
||||||
|
"get_price_level_schema",
|
||||||
"get_price_rating_schema",
|
"get_price_rating_schema",
|
||||||
"get_price_trend_schema",
|
"get_price_trend_schema",
|
||||||
"get_reauth_confirm_schema",
|
"get_reauth_confirm_schema",
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,243 @@
|
||||||
|
"""
|
||||||
|
Entity check utilities for options flow.
|
||||||
|
|
||||||
|
This module provides functions to check if relevant entities are enabled
|
||||||
|
for specific options flow steps. If no relevant entities are enabled,
|
||||||
|
a warning can be displayed to users.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
|
from custom_components.tibber_prices.const import DOMAIN
|
||||||
|
from homeassistant.helpers.entity_registry import async_get as async_get_entity_registry
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from homeassistant.config_entries import ConfigEntry
|
||||||
|
from homeassistant.core import HomeAssistant
|
||||||
|
|
||||||
|
_LOGGER = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Maximum number of example sensors to show in warning message
|
||||||
|
MAX_EXAMPLE_SENSORS = 3
|
||||||
|
# Threshold for using "and" vs "," in formatted names
|
||||||
|
NAMES_SIMPLE_JOIN_THRESHOLD = 2
|
||||||
|
|
||||||
|
# Mapping of options flow steps to affected sensor keys
|
||||||
|
# These are the entity keys (from sensor/definitions.py and binary_sensor/definitions.py)
|
||||||
|
# that are affected by each settings page
|
||||||
|
STEP_TO_SENSOR_KEYS: dict[str, list[str]] = {
|
||||||
|
# Price Rating settings affect all rating sensors
|
||||||
|
"current_interval_price_rating": [
|
||||||
|
# Interval rating sensors
|
||||||
|
"current_interval_price_rating",
|
||||||
|
"next_interval_price_rating",
|
||||||
|
"previous_interval_price_rating",
|
||||||
|
# Rolling hour rating sensors
|
||||||
|
"current_hour_price_rating",
|
||||||
|
"next_hour_price_rating",
|
||||||
|
# Daily rating sensors
|
||||||
|
"yesterday_price_rating",
|
||||||
|
"today_price_rating",
|
||||||
|
"tomorrow_price_rating",
|
||||||
|
],
|
||||||
|
# Price Level settings affect level sensors and period binary sensors
|
||||||
|
"price_level": [
|
||||||
|
# Interval level sensors
|
||||||
|
"current_interval_price_level",
|
||||||
|
"next_interval_price_level",
|
||||||
|
"previous_interval_price_level",
|
||||||
|
# Rolling hour level sensors
|
||||||
|
"current_hour_price_level",
|
||||||
|
"next_hour_price_level",
|
||||||
|
# Daily level sensors
|
||||||
|
"yesterday_price_level",
|
||||||
|
"today_price_level",
|
||||||
|
"tomorrow_price_level",
|
||||||
|
# Binary sensors that use level filtering
|
||||||
|
"best_price_period",
|
||||||
|
"peak_price_period",
|
||||||
|
],
|
||||||
|
# Volatility settings affect volatility sensors
|
||||||
|
"volatility": [
|
||||||
|
"today_volatility",
|
||||||
|
"tomorrow_volatility",
|
||||||
|
"next_24h_volatility",
|
||||||
|
"today_tomorrow_volatility",
|
||||||
|
# Also affects trend sensors (adaptive thresholds)
|
||||||
|
"current_price_trend",
|
||||||
|
"next_price_trend_change",
|
||||||
|
"price_trend_1h",
|
||||||
|
"price_trend_2h",
|
||||||
|
"price_trend_3h",
|
||||||
|
"price_trend_4h",
|
||||||
|
"price_trend_5h",
|
||||||
|
"price_trend_6h",
|
||||||
|
"price_trend_8h",
|
||||||
|
"price_trend_12h",
|
||||||
|
],
|
||||||
|
# Best Price settings affect best price binary sensor and timing sensors
|
||||||
|
"best_price": [
|
||||||
|
# Binary sensor
|
||||||
|
"best_price_period",
|
||||||
|
# Timing sensors
|
||||||
|
"best_price_end_time",
|
||||||
|
"best_price_period_duration",
|
||||||
|
"best_price_remaining_minutes",
|
||||||
|
"best_price_progress",
|
||||||
|
"best_price_next_start_time",
|
||||||
|
"best_price_next_in_minutes",
|
||||||
|
],
|
||||||
|
# Peak Price settings affect peak price binary sensor and timing sensors
|
||||||
|
"peak_price": [
|
||||||
|
# Binary sensor
|
||||||
|
"peak_price_period",
|
||||||
|
# Timing sensors
|
||||||
|
"peak_price_end_time",
|
||||||
|
"peak_price_period_duration",
|
||||||
|
"peak_price_remaining_minutes",
|
||||||
|
"peak_price_progress",
|
||||||
|
"peak_price_next_start_time",
|
||||||
|
"peak_price_next_in_minutes",
|
||||||
|
],
|
||||||
|
# Price Trend settings affect trend sensors
|
||||||
|
"price_trend": [
|
||||||
|
"current_price_trend",
|
||||||
|
"next_price_trend_change",
|
||||||
|
"price_trend_1h",
|
||||||
|
"price_trend_2h",
|
||||||
|
"price_trend_3h",
|
||||||
|
"price_trend_4h",
|
||||||
|
"price_trend_5h",
|
||||||
|
"price_trend_6h",
|
||||||
|
"price_trend_8h",
|
||||||
|
"price_trend_12h",
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def check_relevant_entities_enabled(
|
||||||
|
hass: HomeAssistant,
|
||||||
|
config_entry: ConfigEntry,
|
||||||
|
step_id: str,
|
||||||
|
) -> tuple[bool, list[str]]:
|
||||||
|
"""
|
||||||
|
Check if any relevant entities for a settings step are enabled.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
hass: Home Assistant instance
|
||||||
|
config_entry: Current config entry
|
||||||
|
step_id: The options flow step ID
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (has_enabled_entities, list_of_example_sensor_names)
|
||||||
|
- has_enabled_entities: True if at least one relevant entity is enabled
|
||||||
|
- list_of_example_sensor_names: List of example sensor keys for the warning message
|
||||||
|
|
||||||
|
"""
|
||||||
|
sensor_keys = STEP_TO_SENSOR_KEYS.get(step_id)
|
||||||
|
if not sensor_keys:
|
||||||
|
# No mapping for this step - no check needed
|
||||||
|
return True, []
|
||||||
|
|
||||||
|
entity_registry = async_get_entity_registry(hass)
|
||||||
|
entry_id = config_entry.entry_id
|
||||||
|
|
||||||
|
enabled_count = 0
|
||||||
|
example_sensors: list[str] = []
|
||||||
|
|
||||||
|
for entity in entity_registry.entities.values():
|
||||||
|
# Check if entity belongs to our integration and config entry
|
||||||
|
if entity.config_entry_id != entry_id:
|
||||||
|
continue
|
||||||
|
if entity.platform != DOMAIN:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Extract the sensor key from unique_id
|
||||||
|
# unique_id format: "{home_id}_{sensor_key}" or "{entry_id}_{sensor_key}"
|
||||||
|
unique_id = entity.unique_id or ""
|
||||||
|
# The sensor key is after the last underscore that separates the ID prefix
|
||||||
|
# We check if any of our target keys is contained in the unique_id
|
||||||
|
for sensor_key in sensor_keys:
|
||||||
|
if unique_id.endswith(f"_{sensor_key}") or unique_id == sensor_key:
|
||||||
|
# Found a matching entity
|
||||||
|
if entity.disabled_by is None:
|
||||||
|
# Entity is enabled
|
||||||
|
enabled_count += 1
|
||||||
|
break
|
||||||
|
# Entity is disabled - add to examples (max MAX_EXAMPLE_SENSORS)
|
||||||
|
if len(example_sensors) < MAX_EXAMPLE_SENSORS and sensor_key not in example_sensors:
|
||||||
|
example_sensors.append(sensor_key)
|
||||||
|
break
|
||||||
|
|
||||||
|
# If we found enabled entities, return success
|
||||||
|
if enabled_count > 0:
|
||||||
|
return True, []
|
||||||
|
|
||||||
|
# No enabled entities - return the example sensors for the warning
|
||||||
|
# If we haven't collected any examples yet, use the first from the mapping
|
||||||
|
if not example_sensors:
|
||||||
|
example_sensors = sensor_keys[:MAX_EXAMPLE_SENSORS]
|
||||||
|
|
||||||
|
return False, example_sensors
|
||||||
|
|
||||||
|
|
||||||
|
def format_sensor_names_for_warning(sensor_keys: list[str]) -> str:
|
||||||
|
"""
|
||||||
|
Format sensor keys into human-readable names for warning message.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sensor_keys: List of sensor keys
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted string like "Best Price Period, Best Price End Time, ..."
|
||||||
|
|
||||||
|
"""
|
||||||
|
# Convert snake_case keys to Title Case names
|
||||||
|
names = []
|
||||||
|
for key in sensor_keys:
|
||||||
|
# Replace underscores with spaces and title case
|
||||||
|
name = key.replace("_", " ").title()
|
||||||
|
names.append(name)
|
||||||
|
|
||||||
|
if len(names) <= NAMES_SIMPLE_JOIN_THRESHOLD:
|
||||||
|
return " and ".join(names)
|
||||||
|
|
||||||
|
return ", ".join(names[:-1]) + ", and " + names[-1]
|
||||||
|
|
||||||
|
|
||||||
|
def check_chart_data_export_enabled(
|
||||||
|
hass: HomeAssistant,
|
||||||
|
config_entry: ConfigEntry,
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Check if the Chart Data Export sensor is enabled.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
hass: Home Assistant instance
|
||||||
|
config_entry: Current config entry
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if the Chart Data Export sensor is enabled, False otherwise
|
||||||
|
|
||||||
|
"""
|
||||||
|
entity_registry = async_get_entity_registry(hass)
|
||||||
|
entry_id = config_entry.entry_id
|
||||||
|
|
||||||
|
for entity in entity_registry.entities.values():
|
||||||
|
# Check if entity belongs to our integration and config entry
|
||||||
|
if entity.config_entry_id != entry_id:
|
||||||
|
continue
|
||||||
|
if entity.platform != DOMAIN:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check for chart_data_export sensor
|
||||||
|
unique_id = entity.unique_id or ""
|
||||||
|
if unique_id.endswith("_chart_data_export") or unique_id == "chart_data_export":
|
||||||
|
# Found the entity - check if enabled
|
||||||
|
return entity.disabled_by is None
|
||||||
|
|
||||||
|
# Entity not found (shouldn't happen, but treat as disabled)
|
||||||
|
return False
|
||||||
|
|
@ -9,12 +9,19 @@ from typing import TYPE_CHECKING, Any
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from collections.abc import Mapping
|
from collections.abc import Mapping
|
||||||
|
|
||||||
|
from custom_components.tibber_prices.config_flow_handlers.entity_check import (
|
||||||
|
check_chart_data_export_enabled,
|
||||||
|
check_relevant_entities_enabled,
|
||||||
|
format_sensor_names_for_warning,
|
||||||
|
)
|
||||||
from custom_components.tibber_prices.config_flow_handlers.schemas import (
|
from custom_components.tibber_prices.config_flow_handlers.schemas import (
|
||||||
|
ConfigOverrides,
|
||||||
get_best_price_schema,
|
get_best_price_schema,
|
||||||
get_chart_data_export_schema,
|
get_chart_data_export_schema,
|
||||||
get_display_settings_schema,
|
get_display_settings_schema,
|
||||||
get_options_init_schema,
|
get_options_init_schema,
|
||||||
get_peak_price_schema,
|
get_peak_price_schema,
|
||||||
|
get_price_level_schema,
|
||||||
get_price_rating_schema,
|
get_price_rating_schema,
|
||||||
get_price_trend_schema,
|
get_price_trend_schema,
|
||||||
get_reset_to_defaults_schema,
|
get_reset_to_defaults_schema,
|
||||||
|
|
@ -32,6 +39,8 @@ from custom_components.tibber_prices.config_flow_handlers.validators import (
|
||||||
validate_price_rating_thresholds,
|
validate_price_rating_thresholds,
|
||||||
validate_price_trend_falling,
|
validate_price_trend_falling,
|
||||||
validate_price_trend_rising,
|
validate_price_trend_rising,
|
||||||
|
validate_price_trend_strongly_falling,
|
||||||
|
validate_price_trend_strongly_rising,
|
||||||
validate_relaxation_attempts,
|
validate_relaxation_attempts,
|
||||||
validate_volatility_threshold_high,
|
validate_volatility_threshold_high,
|
||||||
validate_volatility_threshold_moderate,
|
validate_volatility_threshold_moderate,
|
||||||
|
|
@ -53,6 +62,8 @@ from custom_components.tibber_prices.const import (
|
||||||
CONF_PRICE_RATING_THRESHOLD_LOW,
|
CONF_PRICE_RATING_THRESHOLD_LOW,
|
||||||
CONF_PRICE_TREND_THRESHOLD_FALLING,
|
CONF_PRICE_TREND_THRESHOLD_FALLING,
|
||||||
CONF_PRICE_TREND_THRESHOLD_RISING,
|
CONF_PRICE_TREND_THRESHOLD_RISING,
|
||||||
|
CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING,
|
||||||
|
CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING,
|
||||||
CONF_RELAXATION_ATTEMPTS_BEST,
|
CONF_RELAXATION_ATTEMPTS_BEST,
|
||||||
CONF_RELAXATION_ATTEMPTS_PEAK,
|
CONF_RELAXATION_ATTEMPTS_PEAK,
|
||||||
CONF_VOLATILITY_THRESHOLD_HIGH,
|
CONF_VOLATILITY_THRESHOLD_HIGH,
|
||||||
|
|
@ -62,9 +73,11 @@ from custom_components.tibber_prices.const import (
|
||||||
DEFAULT_VOLATILITY_THRESHOLD_MODERATE,
|
DEFAULT_VOLATILITY_THRESHOLD_MODERATE,
|
||||||
DEFAULT_VOLATILITY_THRESHOLD_VERY_HIGH,
|
DEFAULT_VOLATILITY_THRESHOLD_VERY_HIGH,
|
||||||
DOMAIN,
|
DOMAIN,
|
||||||
|
async_get_translation,
|
||||||
get_default_options,
|
get_default_options,
|
||||||
)
|
)
|
||||||
from homeassistant.config_entries import ConfigFlowResult, OptionsFlow
|
from homeassistant.config_entries import ConfigFlowResult, OptionsFlow
|
||||||
|
from homeassistant.helpers import entity_registry as er
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
_LOGGER = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
@ -178,6 +191,221 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
def _get_entity_warning_placeholders(self, step_id: str) -> dict[str, str]:
|
||||||
|
"""
|
||||||
|
Get description placeholders for entity availability warning.
|
||||||
|
|
||||||
|
Checks if any relevant entities for the step are enabled.
|
||||||
|
If not, adds a warning placeholder to display in the form description.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
step_id: The options flow step ID
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary with placeholder keys for the form description
|
||||||
|
|
||||||
|
"""
|
||||||
|
has_enabled, example_sensors = check_relevant_entities_enabled(self.hass, self.config_entry, step_id)
|
||||||
|
|
||||||
|
if has_enabled:
|
||||||
|
# No warning needed - return empty placeholder
|
||||||
|
return {"entity_warning": ""}
|
||||||
|
|
||||||
|
# Build warning message with example sensor names
|
||||||
|
sensor_names = format_sensor_names_for_warning(example_sensors)
|
||||||
|
return {
|
||||||
|
"entity_warning": f"\n\n⚠️ **Note:** No sensors affected by these settings are currently enabled. "
|
||||||
|
f"To use these settings, first enable relevant sensors like *{sensor_names}* "
|
||||||
|
f"in **Settings → Devices & Services → Tibber Prices → Entities**."
|
||||||
|
}
|
||||||
|
|
||||||
|
def _get_enabled_config_entities(self) -> set[str]:
|
||||||
|
"""
|
||||||
|
Get config keys that have their config entity enabled.
|
||||||
|
|
||||||
|
Checks the entity registry for number/switch entities that override
|
||||||
|
config values. Returns the config_key for each enabled entity.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Set of config keys (e.g., "best_price_flex", "enable_min_periods_best")
|
||||||
|
|
||||||
|
"""
|
||||||
|
enabled_keys: set[str] = set()
|
||||||
|
ent_reg = er.async_get(self.hass)
|
||||||
|
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Checking for enabled config override entities for entry %s",
|
||||||
|
self.config_entry.entry_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Map entity keys to their config keys
|
||||||
|
# Entity keys are defined in number/definitions.py and switch/definitions.py
|
||||||
|
override_entities = {
|
||||||
|
# Number entities (best price)
|
||||||
|
"number.best_price_flex_override": "best_price_flex",
|
||||||
|
"number.best_price_min_distance_override": "best_price_min_distance_from_avg",
|
||||||
|
"number.best_price_min_period_length_override": "best_price_min_period_length",
|
||||||
|
"number.best_price_min_periods_override": "min_periods_best",
|
||||||
|
"number.best_price_relaxation_attempts_override": "relaxation_attempts_best",
|
||||||
|
"number.best_price_gap_count_override": "best_price_max_level_gap_count",
|
||||||
|
# Number entities (peak price)
|
||||||
|
"number.peak_price_flex_override": "peak_price_flex",
|
||||||
|
"number.peak_price_min_distance_override": "peak_price_min_distance_from_avg",
|
||||||
|
"number.peak_price_min_period_length_override": "peak_price_min_period_length",
|
||||||
|
"number.peak_price_min_periods_override": "min_periods_peak",
|
||||||
|
"number.peak_price_relaxation_attempts_override": "relaxation_attempts_peak",
|
||||||
|
"number.peak_price_gap_count_override": "peak_price_max_level_gap_count",
|
||||||
|
# Switch entities
|
||||||
|
"switch.best_price_enable_relaxation_override": "enable_min_periods_best",
|
||||||
|
"switch.peak_price_enable_relaxation_override": "enable_min_periods_peak",
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check each possible override entity
|
||||||
|
for entity_id_suffix, config_key in override_entities.items():
|
||||||
|
# Entity IDs include device name, so we need to search by unique_id pattern
|
||||||
|
# The unique_id follows pattern: {config_entry_id}_{entity_key}
|
||||||
|
domain, entity_key = entity_id_suffix.split(".", 1)
|
||||||
|
|
||||||
|
# Find entity by iterating through registry
|
||||||
|
for entity_entry in ent_reg.entities.values():
|
||||||
|
if (
|
||||||
|
entity_entry.domain == domain
|
||||||
|
and entity_entry.config_entry_id == self.config_entry.entry_id
|
||||||
|
and entity_entry.unique_id
|
||||||
|
and entity_entry.unique_id.endswith(entity_key)
|
||||||
|
and not entity_entry.disabled
|
||||||
|
):
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Found enabled config override entity: %s -> config_key=%s",
|
||||||
|
entity_entry.entity_id,
|
||||||
|
config_key,
|
||||||
|
)
|
||||||
|
enabled_keys.add(config_key)
|
||||||
|
break
|
||||||
|
|
||||||
|
_LOGGER.debug("Enabled config override keys: %s", enabled_keys)
|
||||||
|
return enabled_keys
|
||||||
|
|
||||||
|
def _get_active_overrides(self) -> ConfigOverrides:
|
||||||
|
"""
|
||||||
|
Build override dict from enabled config entities.
|
||||||
|
|
||||||
|
Returns a dict structure compatible with schema functions.
|
||||||
|
"""
|
||||||
|
enabled_keys = self._get_enabled_config_entities()
|
||||||
|
if not enabled_keys:
|
||||||
|
_LOGGER.debug("No enabled config override entities found")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
# Build structure expected by schema: {section: {key: True}}
|
||||||
|
# Section doesn't matter for read_only check, we just need the key present
|
||||||
|
overrides: ConfigOverrides = {"_enabled": {}}
|
||||||
|
for key in enabled_keys:
|
||||||
|
overrides["_enabled"][key] = True
|
||||||
|
|
||||||
|
_LOGGER.debug("Active overrides structure: %s", overrides)
|
||||||
|
return overrides
|
||||||
|
|
||||||
|
def _get_override_warning_placeholder(self, step_id: str, overrides: ConfigOverrides) -> dict[str, str]:
|
||||||
|
"""
|
||||||
|
Get description placeholder for config override warning.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
step_id: The options flow step ID (e.g., "best_price", "peak_price")
|
||||||
|
overrides: Active overrides dictionary
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary with 'override_warning' placeholder
|
||||||
|
|
||||||
|
"""
|
||||||
|
# Define which config keys belong to each step
|
||||||
|
step_keys: dict[str, set[str]] = {
|
||||||
|
"best_price": {
|
||||||
|
"best_price_flex",
|
||||||
|
"best_price_min_distance_from_avg",
|
||||||
|
"best_price_min_period_length",
|
||||||
|
"min_periods_best",
|
||||||
|
"relaxation_attempts_best",
|
||||||
|
"enable_min_periods_best",
|
||||||
|
},
|
||||||
|
"peak_price": {
|
||||||
|
"peak_price_flex",
|
||||||
|
"peak_price_min_distance_from_avg",
|
||||||
|
"peak_price_min_period_length",
|
||||||
|
"min_periods_peak",
|
||||||
|
"relaxation_attempts_peak",
|
||||||
|
"enable_min_periods_peak",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
keys_to_check = step_keys.get(step_id, set())
|
||||||
|
enabled_keys = overrides.get("_enabled", {})
|
||||||
|
override_count = sum(1 for k in enabled_keys if k in keys_to_check)
|
||||||
|
|
||||||
|
if override_count > 0:
|
||||||
|
field_word = "field is" if override_count == 1 else "fields are"
|
||||||
|
return {
|
||||||
|
"override_warning": (
|
||||||
|
f"\n\n🔒 **{override_count} {field_word} managed by configuration entities** "
|
||||||
|
"(grayed out). Disable the config entity to edit here, "
|
||||||
|
"or change the value directly via the entity."
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return {"override_warning": ""}
|
||||||
|
|
||||||
|
async def _get_override_translations(self) -> dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Load override translations from common section.
|
||||||
|
|
||||||
|
Uses the system language setting from Home Assistant.
|
||||||
|
Note: HA Options Flow does not provide user_id in context,
|
||||||
|
so we cannot determine the individual user's language preference.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary with override_warning_template, override_warning_and,
|
||||||
|
and override_field_label_* keys for each config field.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# Use system language - HA Options Flow context doesn't include user_id
|
||||||
|
language = self.hass.config.language or "en"
|
||||||
|
_LOGGER.debug("Loading override translations for language: %s", language)
|
||||||
|
translations: dict[str, Any] = {}
|
||||||
|
|
||||||
|
# Load template and connector from common section
|
||||||
|
template = await async_get_translation(self.hass, ["common", "override_warning_template"], language)
|
||||||
|
_LOGGER.debug("Loaded template: %s", template)
|
||||||
|
if template:
|
||||||
|
translations["override_warning_template"] = template
|
||||||
|
|
||||||
|
and_connector = await async_get_translation(self.hass, ["common", "override_warning_and"], language)
|
||||||
|
if and_connector:
|
||||||
|
translations["override_warning_and"] = and_connector
|
||||||
|
|
||||||
|
# Load flat field label translations
|
||||||
|
field_keys = [
|
||||||
|
"best_price_min_period_length",
|
||||||
|
"best_price_max_level_gap_count",
|
||||||
|
"best_price_flex",
|
||||||
|
"best_price_min_distance_from_avg",
|
||||||
|
"enable_min_periods_best",
|
||||||
|
"min_periods_best",
|
||||||
|
"relaxation_attempts_best",
|
||||||
|
"peak_price_min_period_length",
|
||||||
|
"peak_price_max_level_gap_count",
|
||||||
|
"peak_price_flex",
|
||||||
|
"peak_price_min_distance_from_avg",
|
||||||
|
"enable_min_periods_peak",
|
||||||
|
"min_periods_peak",
|
||||||
|
"relaxation_attempts_peak",
|
||||||
|
]
|
||||||
|
for field_key in field_keys:
|
||||||
|
translation_key = f"override_field_label_{field_key}"
|
||||||
|
label = await async_get_translation(self.hass, ["common", translation_key], language)
|
||||||
|
if label:
|
||||||
|
translations[translation_key] = label
|
||||||
|
|
||||||
|
return translations
|
||||||
|
|
||||||
async def async_step_init(self, _user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
async def async_step_init(self, _user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
||||||
"""Manage the options - show menu."""
|
"""Manage the options - show menu."""
|
||||||
# Always reload options from config_entry to get latest saved state
|
# Always reload options from config_entry to get latest saved state
|
||||||
|
|
@ -191,6 +419,7 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
||||||
"general_settings",
|
"general_settings",
|
||||||
"display_settings",
|
"display_settings",
|
||||||
"current_interval_price_rating",
|
"current_interval_price_rating",
|
||||||
|
"price_level",
|
||||||
"volatility",
|
"volatility",
|
||||||
"best_price",
|
"best_price",
|
||||||
"peak_price",
|
"peak_price",
|
||||||
|
|
@ -327,6 +556,27 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
||||||
step_id="current_interval_price_rating",
|
step_id="current_interval_price_rating",
|
||||||
data_schema=get_price_rating_schema(self.config_entry.options),
|
data_schema=get_price_rating_schema(self.config_entry.options),
|
||||||
errors=errors,
|
errors=errors,
|
||||||
|
description_placeholders=self._get_entity_warning_placeholders("current_interval_price_rating"),
|
||||||
|
)
|
||||||
|
|
||||||
|
async def async_step_price_level(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
||||||
|
"""Configure Tibber price level gap tolerance (smoothing for API 'level' field)."""
|
||||||
|
errors: dict[str, str] = {}
|
||||||
|
|
||||||
|
if user_input is not None:
|
||||||
|
# No validation needed - slider constraints ensure valid range
|
||||||
|
# Store flat data directly in options
|
||||||
|
self._options.update(user_input)
|
||||||
|
# async_create_entry automatically handles change detection and listener triggering
|
||||||
|
self._save_options_if_changed()
|
||||||
|
# Return to menu for more changes
|
||||||
|
return await self.async_step_init()
|
||||||
|
|
||||||
|
return self.async_show_form(
|
||||||
|
step_id="price_level",
|
||||||
|
data_schema=get_price_level_schema(self.config_entry.options),
|
||||||
|
errors=errors,
|
||||||
|
description_placeholders=self._get_entity_warning_placeholders("price_level"),
|
||||||
)
|
)
|
||||||
|
|
||||||
async def async_step_best_price(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
async def async_step_best_price(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
||||||
|
|
@ -386,10 +636,22 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
||||||
# Return to menu for more changes
|
# Return to menu for more changes
|
||||||
return await self.async_step_init()
|
return await self.async_step_init()
|
||||||
|
|
||||||
|
overrides = self._get_active_overrides()
|
||||||
|
placeholders = self._get_entity_warning_placeholders("best_price")
|
||||||
|
placeholders.update(self._get_override_warning_placeholder("best_price", overrides))
|
||||||
|
|
||||||
|
# Load translations for override warnings
|
||||||
|
override_translations = await self._get_override_translations()
|
||||||
|
|
||||||
return self.async_show_form(
|
return self.async_show_form(
|
||||||
step_id="best_price",
|
step_id="best_price",
|
||||||
data_schema=get_best_price_schema(self.config_entry.options),
|
data_schema=get_best_price_schema(
|
||||||
|
self.config_entry.options,
|
||||||
|
overrides=overrides,
|
||||||
|
translations=override_translations,
|
||||||
|
),
|
||||||
errors=errors,
|
errors=errors,
|
||||||
|
description_placeholders=placeholders,
|
||||||
)
|
)
|
||||||
|
|
||||||
async def async_step_peak_price(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
async def async_step_peak_price(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
||||||
|
|
@ -446,10 +708,22 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
||||||
# Return to menu for more changes
|
# Return to menu for more changes
|
||||||
return await self.async_step_init()
|
return await self.async_step_init()
|
||||||
|
|
||||||
|
overrides = self._get_active_overrides()
|
||||||
|
placeholders = self._get_entity_warning_placeholders("peak_price")
|
||||||
|
placeholders.update(self._get_override_warning_placeholder("peak_price", overrides))
|
||||||
|
|
||||||
|
# Load translations for override warnings
|
||||||
|
override_translations = await self._get_override_translations()
|
||||||
|
|
||||||
return self.async_show_form(
|
return self.async_show_form(
|
||||||
step_id="peak_price",
|
step_id="peak_price",
|
||||||
data_schema=get_peak_price_schema(self.config_entry.options),
|
data_schema=get_peak_price_schema(
|
||||||
|
self.config_entry.options,
|
||||||
|
overrides=overrides,
|
||||||
|
translations=override_translations,
|
||||||
|
),
|
||||||
errors=errors,
|
errors=errors,
|
||||||
|
description_placeholders=placeholders,
|
||||||
)
|
)
|
||||||
|
|
||||||
async def async_step_price_trend(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
async def async_step_price_trend(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
||||||
|
|
@ -472,6 +746,34 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
||||||
):
|
):
|
||||||
errors[CONF_PRICE_TREND_THRESHOLD_FALLING] = "invalid_price_trend_falling"
|
errors[CONF_PRICE_TREND_THRESHOLD_FALLING] = "invalid_price_trend_falling"
|
||||||
|
|
||||||
|
# Validate strongly rising trend threshold
|
||||||
|
if CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING in user_input and not validate_price_trend_strongly_rising(
|
||||||
|
user_input[CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING]
|
||||||
|
):
|
||||||
|
errors[CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING] = "invalid_price_trend_strongly_rising"
|
||||||
|
|
||||||
|
# Validate strongly falling trend threshold
|
||||||
|
if CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING in user_input and not validate_price_trend_strongly_falling(
|
||||||
|
user_input[CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING]
|
||||||
|
):
|
||||||
|
errors[CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING] = "invalid_price_trend_strongly_falling"
|
||||||
|
|
||||||
|
# Cross-validation: Ensure rising < strongly_rising and falling > strongly_falling
|
||||||
|
if not errors:
|
||||||
|
rising = user_input.get(CONF_PRICE_TREND_THRESHOLD_RISING)
|
||||||
|
strongly_rising = user_input.get(CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING)
|
||||||
|
falling = user_input.get(CONF_PRICE_TREND_THRESHOLD_FALLING)
|
||||||
|
strongly_falling = user_input.get(CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING)
|
||||||
|
|
||||||
|
if rising is not None and strongly_rising is not None and rising >= strongly_rising:
|
||||||
|
errors[CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING] = (
|
||||||
|
"invalid_trend_strongly_rising_less_than_rising"
|
||||||
|
)
|
||||||
|
if falling is not None and strongly_falling is not None and falling <= strongly_falling:
|
||||||
|
errors[CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING] = (
|
||||||
|
"invalid_trend_strongly_falling_greater_than_falling"
|
||||||
|
)
|
||||||
|
|
||||||
if not errors:
|
if not errors:
|
||||||
# Store flat data directly in options (no section wrapping)
|
# Store flat data directly in options (no section wrapping)
|
||||||
self._options.update(user_input)
|
self._options.update(user_input)
|
||||||
|
|
@ -484,6 +786,7 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
||||||
step_id="price_trend",
|
step_id="price_trend",
|
||||||
data_schema=get_price_trend_schema(self.config_entry.options),
|
data_schema=get_price_trend_schema(self.config_entry.options),
|
||||||
errors=errors,
|
errors=errors,
|
||||||
|
description_placeholders=self._get_entity_warning_placeholders("price_trend"),
|
||||||
)
|
)
|
||||||
|
|
||||||
async def async_step_chart_data_export(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
async def async_step_chart_data_export(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
||||||
|
|
@ -492,10 +795,44 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
||||||
# No changes to save - just return to menu
|
# No changes to save - just return to menu
|
||||||
return await self.async_step_init()
|
return await self.async_step_init()
|
||||||
|
|
||||||
# Show info-only form (no input fields)
|
# Check if the chart data export sensor is enabled
|
||||||
|
is_enabled = check_chart_data_export_enabled(self.hass, self.config_entry)
|
||||||
|
|
||||||
|
# Show info-only form with status-dependent description
|
||||||
return self.async_show_form(
|
return self.async_show_form(
|
||||||
step_id="chart_data_export",
|
step_id="chart_data_export",
|
||||||
data_schema=get_chart_data_export_schema(self.config_entry.options),
|
data_schema=get_chart_data_export_schema(self.config_entry.options),
|
||||||
|
description_placeholders={
|
||||||
|
"sensor_status_info": self._get_chart_export_status_info(is_enabled=is_enabled),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_chart_export_status_info(self, *, is_enabled: bool) -> str:
|
||||||
|
"""Get the status info block for chart data export sensor."""
|
||||||
|
if is_enabled:
|
||||||
|
return (
|
||||||
|
"✅ **Status: Sensor is enabled**\n\n"
|
||||||
|
"The Chart Data Export sensor is currently active and providing data as attributes.\n\n"
|
||||||
|
"**Configuration (optional):**\n\n"
|
||||||
|
"Default settings work out-of-the-box (today+tomorrow, 15-minute intervals, prices only).\n\n"
|
||||||
|
"For customization, add to **`configuration.yaml`**:\n\n"
|
||||||
|
"```yaml\n"
|
||||||
|
"tibber_prices:\n"
|
||||||
|
" chart_export:\n"
|
||||||
|
" day:\n"
|
||||||
|
" - today\n"
|
||||||
|
" - tomorrow\n"
|
||||||
|
" include_level: true\n"
|
||||||
|
" include_rating_level: true\n"
|
||||||
|
"```\n\n"
|
||||||
|
"**All parameters:** See `tibber_prices.get_chartdata` service documentation"
|
||||||
|
)
|
||||||
|
return (
|
||||||
|
"❌ **Status: Sensor is disabled**\n\n"
|
||||||
|
"**Enable the sensor:**\n\n"
|
||||||
|
"1. Open **Settings → Devices & Services → Tibber Prices**\n"
|
||||||
|
"2. Select your home → Find **'Chart Data Export'** (Diagnostic section)\n"
|
||||||
|
"3. **Enable the sensor** (disabled by default)"
|
||||||
)
|
)
|
||||||
|
|
||||||
async def async_step_volatility(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
async def async_step_volatility(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
||||||
|
|
@ -554,4 +891,5 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
||||||
step_id="volatility",
|
step_id="volatility",
|
||||||
data_schema=get_volatility_schema(self.config_entry.options),
|
data_schema=get_volatility_schema(self.config_entry.options),
|
||||||
errors=errors,
|
errors=errors,
|
||||||
|
description_placeholders=self._get_entity_warning_placeholders("volatility"),
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -28,10 +28,15 @@ from custom_components.tibber_prices.const import (
|
||||||
CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||||
CONF_PEAK_PRICE_MIN_LEVEL,
|
CONF_PEAK_PRICE_MIN_LEVEL,
|
||||||
CONF_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
CONF_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
||||||
|
CONF_PRICE_LEVEL_GAP_TOLERANCE,
|
||||||
|
CONF_PRICE_RATING_GAP_TOLERANCE,
|
||||||
|
CONF_PRICE_RATING_HYSTERESIS,
|
||||||
CONF_PRICE_RATING_THRESHOLD_HIGH,
|
CONF_PRICE_RATING_THRESHOLD_HIGH,
|
||||||
CONF_PRICE_RATING_THRESHOLD_LOW,
|
CONF_PRICE_RATING_THRESHOLD_LOW,
|
||||||
CONF_PRICE_TREND_THRESHOLD_FALLING,
|
CONF_PRICE_TREND_THRESHOLD_FALLING,
|
||||||
CONF_PRICE_TREND_THRESHOLD_RISING,
|
CONF_PRICE_TREND_THRESHOLD_RISING,
|
||||||
|
CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING,
|
||||||
|
CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING,
|
||||||
CONF_RELAXATION_ATTEMPTS_BEST,
|
CONF_RELAXATION_ATTEMPTS_BEST,
|
||||||
CONF_RELAXATION_ATTEMPTS_PEAK,
|
CONF_RELAXATION_ATTEMPTS_PEAK,
|
||||||
CONF_VIRTUAL_TIME_OFFSET_DAYS,
|
CONF_VIRTUAL_TIME_OFFSET_DAYS,
|
||||||
|
|
@ -56,10 +61,15 @@ from custom_components.tibber_prices.const import (
|
||||||
DEFAULT_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
DEFAULT_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||||
DEFAULT_PEAK_PRICE_MIN_LEVEL,
|
DEFAULT_PEAK_PRICE_MIN_LEVEL,
|
||||||
DEFAULT_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
DEFAULT_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
||||||
|
DEFAULT_PRICE_LEVEL_GAP_TOLERANCE,
|
||||||
|
DEFAULT_PRICE_RATING_GAP_TOLERANCE,
|
||||||
|
DEFAULT_PRICE_RATING_HYSTERESIS,
|
||||||
DEFAULT_PRICE_RATING_THRESHOLD_HIGH,
|
DEFAULT_PRICE_RATING_THRESHOLD_HIGH,
|
||||||
DEFAULT_PRICE_RATING_THRESHOLD_LOW,
|
DEFAULT_PRICE_RATING_THRESHOLD_LOW,
|
||||||
DEFAULT_PRICE_TREND_THRESHOLD_FALLING,
|
DEFAULT_PRICE_TREND_THRESHOLD_FALLING,
|
||||||
DEFAULT_PRICE_TREND_THRESHOLD_RISING,
|
DEFAULT_PRICE_TREND_THRESHOLD_RISING,
|
||||||
|
DEFAULT_PRICE_TREND_THRESHOLD_STRONGLY_FALLING,
|
||||||
|
DEFAULT_PRICE_TREND_THRESHOLD_STRONGLY_RISING,
|
||||||
DEFAULT_RELAXATION_ATTEMPTS_BEST,
|
DEFAULT_RELAXATION_ATTEMPTS_BEST,
|
||||||
DEFAULT_RELAXATION_ATTEMPTS_PEAK,
|
DEFAULT_RELAXATION_ATTEMPTS_PEAK,
|
||||||
DEFAULT_VIRTUAL_TIME_OFFSET_DAYS,
|
DEFAULT_VIRTUAL_TIME_OFFSET_DAYS,
|
||||||
|
|
@ -73,20 +83,30 @@ from custom_components.tibber_prices.const import (
|
||||||
MAX_GAP_COUNT,
|
MAX_GAP_COUNT,
|
||||||
MAX_MIN_PERIOD_LENGTH,
|
MAX_MIN_PERIOD_LENGTH,
|
||||||
MAX_MIN_PERIODS,
|
MAX_MIN_PERIODS,
|
||||||
|
MAX_PRICE_LEVEL_GAP_TOLERANCE,
|
||||||
|
MAX_PRICE_RATING_GAP_TOLERANCE,
|
||||||
|
MAX_PRICE_RATING_HYSTERESIS,
|
||||||
MAX_PRICE_RATING_THRESHOLD_HIGH,
|
MAX_PRICE_RATING_THRESHOLD_HIGH,
|
||||||
MAX_PRICE_RATING_THRESHOLD_LOW,
|
MAX_PRICE_RATING_THRESHOLD_LOW,
|
||||||
MAX_PRICE_TREND_FALLING,
|
MAX_PRICE_TREND_FALLING,
|
||||||
MAX_PRICE_TREND_RISING,
|
MAX_PRICE_TREND_RISING,
|
||||||
|
MAX_PRICE_TREND_STRONGLY_FALLING,
|
||||||
|
MAX_PRICE_TREND_STRONGLY_RISING,
|
||||||
MAX_RELAXATION_ATTEMPTS,
|
MAX_RELAXATION_ATTEMPTS,
|
||||||
MAX_VOLATILITY_THRESHOLD_HIGH,
|
MAX_VOLATILITY_THRESHOLD_HIGH,
|
||||||
MAX_VOLATILITY_THRESHOLD_MODERATE,
|
MAX_VOLATILITY_THRESHOLD_MODERATE,
|
||||||
MAX_VOLATILITY_THRESHOLD_VERY_HIGH,
|
MAX_VOLATILITY_THRESHOLD_VERY_HIGH,
|
||||||
MIN_GAP_COUNT,
|
MIN_GAP_COUNT,
|
||||||
MIN_PERIOD_LENGTH,
|
MIN_PERIOD_LENGTH,
|
||||||
|
MIN_PRICE_LEVEL_GAP_TOLERANCE,
|
||||||
|
MIN_PRICE_RATING_GAP_TOLERANCE,
|
||||||
|
MIN_PRICE_RATING_HYSTERESIS,
|
||||||
MIN_PRICE_RATING_THRESHOLD_HIGH,
|
MIN_PRICE_RATING_THRESHOLD_HIGH,
|
||||||
MIN_PRICE_RATING_THRESHOLD_LOW,
|
MIN_PRICE_RATING_THRESHOLD_LOW,
|
||||||
MIN_PRICE_TREND_FALLING,
|
MIN_PRICE_TREND_FALLING,
|
||||||
MIN_PRICE_TREND_RISING,
|
MIN_PRICE_TREND_RISING,
|
||||||
|
MIN_PRICE_TREND_STRONGLY_FALLING,
|
||||||
|
MIN_PRICE_TREND_STRONGLY_RISING,
|
||||||
MIN_RELAXATION_ATTEMPTS,
|
MIN_RELAXATION_ATTEMPTS,
|
||||||
MIN_VOLATILITY_THRESHOLD_HIGH,
|
MIN_VOLATILITY_THRESHOLD_HIGH,
|
||||||
MIN_VOLATILITY_THRESHOLD_MODERATE,
|
MIN_VOLATILITY_THRESHOLD_MODERATE,
|
||||||
|
|
@ -99,6 +119,8 @@ from homeassistant.data_entry_flow import section
|
||||||
from homeassistant.helpers import selector
|
from homeassistant.helpers import selector
|
||||||
from homeassistant.helpers.selector import (
|
from homeassistant.helpers.selector import (
|
||||||
BooleanSelector,
|
BooleanSelector,
|
||||||
|
ConstantSelector,
|
||||||
|
ConstantSelectorConfig,
|
||||||
NumberSelector,
|
NumberSelector,
|
||||||
NumberSelectorConfig,
|
NumberSelectorConfig,
|
||||||
NumberSelectorMode,
|
NumberSelectorMode,
|
||||||
|
|
@ -111,6 +133,155 @@ from homeassistant.helpers.selector import (
|
||||||
TextSelectorType,
|
TextSelectorType,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Type alias for config override structure: {section: {config_key: value}}
|
||||||
|
ConfigOverrides = dict[str, dict[str, Any]]
|
||||||
|
|
||||||
|
|
||||||
|
def is_field_overridden(
|
||||||
|
config_key: str,
|
||||||
|
config_section: str, # noqa: ARG001 - kept for API compatibility
|
||||||
|
overrides: ConfigOverrides | None,
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Check if a config field has an active runtime override.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config_key: The configuration key to check (e.g., "best_price_flex")
|
||||||
|
config_section: Unused, kept for API compatibility
|
||||||
|
overrides: Dictionary of active overrides (with "_enabled" key)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if this field is being overridden by a config entity, False otherwise
|
||||||
|
|
||||||
|
"""
|
||||||
|
if overrides is None:
|
||||||
|
return False
|
||||||
|
# Check if key is in the _enabled section (from entity registry check)
|
||||||
|
return config_key in overrides.get("_enabled", {})
|
||||||
|
|
||||||
|
|
||||||
|
# Override translations structure from common section
|
||||||
|
# This will be loaded at runtime and passed to schema functions
|
||||||
|
OverrideTranslations = dict[str, Any] # Type alias
|
||||||
|
|
||||||
|
# Fallback labels when translations not available
|
||||||
|
# Used only as fallback - translations should be loaded from common.override_field_labels
|
||||||
|
DEFAULT_FIELD_LABELS: dict[str, str] = {
|
||||||
|
# Best Price
|
||||||
|
CONF_BEST_PRICE_MIN_PERIOD_LENGTH: "Minimum Period Length",
|
||||||
|
CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT: "Gap Tolerance",
|
||||||
|
CONF_BEST_PRICE_FLEX: "Flexibility",
|
||||||
|
CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG: "Minimum Distance",
|
||||||
|
CONF_ENABLE_MIN_PERIODS_BEST: "Achieve Minimum Count",
|
||||||
|
CONF_MIN_PERIODS_BEST: "Minimum Periods",
|
||||||
|
CONF_RELAXATION_ATTEMPTS_BEST: "Relaxation Attempts",
|
||||||
|
# Peak Price
|
||||||
|
CONF_PEAK_PRICE_MIN_PERIOD_LENGTH: "Minimum Period Length",
|
||||||
|
CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT: "Gap Tolerance",
|
||||||
|
CONF_PEAK_PRICE_FLEX: "Flexibility",
|
||||||
|
CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG: "Minimum Distance",
|
||||||
|
CONF_ENABLE_MIN_PERIODS_PEAK: "Achieve Minimum Count",
|
||||||
|
CONF_MIN_PERIODS_PEAK: "Minimum Periods",
|
||||||
|
CONF_RELAXATION_ATTEMPTS_PEAK: "Relaxation Attempts",
|
||||||
|
}
|
||||||
|
|
||||||
|
# Section to config keys mapping for override detection
|
||||||
|
SECTION_CONFIG_KEYS: dict[str, dict[str, list[str]]] = {
|
||||||
|
"best_price": {
|
||||||
|
"period_settings": [
|
||||||
|
CONF_BEST_PRICE_MIN_PERIOD_LENGTH,
|
||||||
|
CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||||
|
],
|
||||||
|
"flexibility_settings": [
|
||||||
|
CONF_BEST_PRICE_FLEX,
|
||||||
|
CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||||
|
],
|
||||||
|
"relaxation_and_target_periods": [
|
||||||
|
CONF_ENABLE_MIN_PERIODS_BEST,
|
||||||
|
CONF_MIN_PERIODS_BEST,
|
||||||
|
CONF_RELAXATION_ATTEMPTS_BEST,
|
||||||
|
],
|
||||||
|
},
|
||||||
|
"peak_price": {
|
||||||
|
"period_settings": [
|
||||||
|
CONF_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
||||||
|
CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||||
|
],
|
||||||
|
"flexibility_settings": [
|
||||||
|
CONF_PEAK_PRICE_FLEX,
|
||||||
|
CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||||
|
],
|
||||||
|
"relaxation_and_target_periods": [
|
||||||
|
CONF_ENABLE_MIN_PERIODS_PEAK,
|
||||||
|
CONF_MIN_PERIODS_PEAK,
|
||||||
|
CONF_RELAXATION_ATTEMPTS_PEAK,
|
||||||
|
],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_section_override_warning(
|
||||||
|
step_id: str,
|
||||||
|
section_id: str,
|
||||||
|
overrides: ConfigOverrides | None,
|
||||||
|
translations: OverrideTranslations | None = None,
|
||||||
|
) -> dict[vol.Optional, ConstantSelector] | None:
|
||||||
|
"""
|
||||||
|
Return a warning constant selector if any fields in the section are overridden.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
step_id: The step ID (best_price or peak_price)
|
||||||
|
section_id: The section ID within the step
|
||||||
|
overrides: Active runtime overrides from coordinator
|
||||||
|
translations: Override translations from common section (optional)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with override warning selector if any fields overridden, None otherwise
|
||||||
|
|
||||||
|
"""
|
||||||
|
if not overrides:
|
||||||
|
return None
|
||||||
|
|
||||||
|
section_keys = SECTION_CONFIG_KEYS.get(step_id, {}).get(section_id, [])
|
||||||
|
overridden_fields = []
|
||||||
|
|
||||||
|
for config_key in section_keys:
|
||||||
|
if is_field_overridden(config_key, section_id, overrides):
|
||||||
|
# Try to get translated label from flat keys, fallback to DEFAULT_FIELD_LABELS
|
||||||
|
translation_key = f"override_field_label_{config_key}"
|
||||||
|
label = (translations.get(translation_key) if translations else None) or DEFAULT_FIELD_LABELS.get(
|
||||||
|
config_key, config_key
|
||||||
|
)
|
||||||
|
overridden_fields.append(label)
|
||||||
|
|
||||||
|
if not overridden_fields:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Get translated "and" connector or use fallback
|
||||||
|
and_connector = " and "
|
||||||
|
if translations and "override_warning_and" in translations:
|
||||||
|
and_connector = f" {translations['override_warning_and']} "
|
||||||
|
|
||||||
|
# Build warning message with list of overridden fields
|
||||||
|
if len(overridden_fields) == 1:
|
||||||
|
fields_text = overridden_fields[0]
|
||||||
|
else:
|
||||||
|
fields_text = ", ".join(overridden_fields[:-1]) + and_connector + overridden_fields[-1]
|
||||||
|
|
||||||
|
# Get translated warning template or use fallback
|
||||||
|
warning_template = "⚠️ {fields} controlled by config entity"
|
||||||
|
if translations and "override_warning_template" in translations:
|
||||||
|
warning_template = translations["override_warning_template"]
|
||||||
|
|
||||||
|
return {
|
||||||
|
vol.Optional("_override_warning"): ConstantSelector(
|
||||||
|
ConstantSelectorConfig(
|
||||||
|
value=True,
|
||||||
|
label=warning_template.format(fields=fields_text),
|
||||||
|
)
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def get_user_schema(access_token: str | None = None) -> vol.Schema:
|
def get_user_schema(access_token: str | None = None) -> vol.Schema:
|
||||||
"""Return schema for user step (API token input)."""
|
"""Return schema for user step (API token input)."""
|
||||||
|
|
@ -257,7 +428,7 @@ def get_display_settings_schema(options: Mapping[str, Any], currency_code: str |
|
||||||
|
|
||||||
|
|
||||||
def get_price_rating_schema(options: Mapping[str, Any]) -> vol.Schema:
|
def get_price_rating_schema(options: Mapping[str, Any]) -> vol.Schema:
|
||||||
"""Return schema for price rating thresholds configuration."""
|
"""Return schema for price rating configuration (thresholds and stabilization)."""
|
||||||
return vol.Schema(
|
return vol.Schema(
|
||||||
{
|
{
|
||||||
vol.Optional(
|
vol.Optional(
|
||||||
|
|
@ -294,6 +465,63 @@ def get_price_rating_schema(options: Mapping[str, Any]) -> vol.Schema:
|
||||||
mode=NumberSelectorMode.SLIDER,
|
mode=NumberSelectorMode.SLIDER,
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
|
vol.Optional(
|
||||||
|
CONF_PRICE_RATING_HYSTERESIS,
|
||||||
|
default=float(
|
||||||
|
options.get(
|
||||||
|
CONF_PRICE_RATING_HYSTERESIS,
|
||||||
|
DEFAULT_PRICE_RATING_HYSTERESIS,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
): NumberSelector(
|
||||||
|
NumberSelectorConfig(
|
||||||
|
min=MIN_PRICE_RATING_HYSTERESIS,
|
||||||
|
max=MAX_PRICE_RATING_HYSTERESIS,
|
||||||
|
unit_of_measurement="%",
|
||||||
|
step=0.5,
|
||||||
|
mode=NumberSelectorMode.SLIDER,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
vol.Optional(
|
||||||
|
CONF_PRICE_RATING_GAP_TOLERANCE,
|
||||||
|
default=int(
|
||||||
|
options.get(
|
||||||
|
CONF_PRICE_RATING_GAP_TOLERANCE,
|
||||||
|
DEFAULT_PRICE_RATING_GAP_TOLERANCE,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
): NumberSelector(
|
||||||
|
NumberSelectorConfig(
|
||||||
|
min=MIN_PRICE_RATING_GAP_TOLERANCE,
|
||||||
|
max=MAX_PRICE_RATING_GAP_TOLERANCE,
|
||||||
|
step=1,
|
||||||
|
mode=NumberSelectorMode.SLIDER,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_price_level_schema(options: Mapping[str, Any]) -> vol.Schema:
|
||||||
|
"""Return schema for Tibber price level stabilization (gap tolerance for API level field)."""
|
||||||
|
return vol.Schema(
|
||||||
|
{
|
||||||
|
vol.Optional(
|
||||||
|
CONF_PRICE_LEVEL_GAP_TOLERANCE,
|
||||||
|
default=int(
|
||||||
|
options.get(
|
||||||
|
CONF_PRICE_LEVEL_GAP_TOLERANCE,
|
||||||
|
DEFAULT_PRICE_LEVEL_GAP_TOLERANCE,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
): NumberSelector(
|
||||||
|
NumberSelectorConfig(
|
||||||
|
min=MIN_PRICE_LEVEL_GAP_TOLERANCE,
|
||||||
|
max=MAX_PRICE_LEVEL_GAP_TOLERANCE,
|
||||||
|
step=1,
|
||||||
|
mode=NumberSelectorMode.SLIDER,
|
||||||
|
),
|
||||||
|
),
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -357,298 +585,322 @@ def get_volatility_schema(options: Mapping[str, Any]) -> vol.Schema:
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_best_price_schema(options: Mapping[str, Any]) -> vol.Schema:
|
def get_best_price_schema(
|
||||||
"""Return schema for best price period configuration with collapsible sections."""
|
options: Mapping[str, Any],
|
||||||
|
overrides: ConfigOverrides | None = None,
|
||||||
|
translations: OverrideTranslations | None = None,
|
||||||
|
) -> vol.Schema:
|
||||||
|
"""
|
||||||
|
Return schema for best price period configuration with collapsible sections.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
options: Current options from config entry
|
||||||
|
overrides: Active runtime overrides from coordinator. Fields with active
|
||||||
|
overrides will be replaced with a constant placeholder.
|
||||||
|
translations: Override translations from common section (optional)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Voluptuous schema for the best price configuration form
|
||||||
|
|
||||||
|
"""
|
||||||
period_settings = options.get("period_settings", {})
|
period_settings = options.get("period_settings", {})
|
||||||
|
flexibility_settings = options.get("flexibility_settings", {})
|
||||||
|
relaxation_settings = options.get("relaxation_and_target_periods", {})
|
||||||
|
|
||||||
|
# Get current values for override display
|
||||||
|
min_period_length = int(
|
||||||
|
period_settings.get(CONF_BEST_PRICE_MIN_PERIOD_LENGTH, DEFAULT_BEST_PRICE_MIN_PERIOD_LENGTH)
|
||||||
|
)
|
||||||
|
max_level_gap_count = int(
|
||||||
|
period_settings.get(CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT, DEFAULT_BEST_PRICE_MAX_LEVEL_GAP_COUNT)
|
||||||
|
)
|
||||||
|
best_price_flex = int(flexibility_settings.get(CONF_BEST_PRICE_FLEX, DEFAULT_BEST_PRICE_FLEX))
|
||||||
|
min_distance = int(
|
||||||
|
flexibility_settings.get(CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG, DEFAULT_BEST_PRICE_MIN_DISTANCE_FROM_AVG)
|
||||||
|
)
|
||||||
|
enable_min_periods = relaxation_settings.get(CONF_ENABLE_MIN_PERIODS_BEST, DEFAULT_ENABLE_MIN_PERIODS_BEST)
|
||||||
|
min_periods = int(relaxation_settings.get(CONF_MIN_PERIODS_BEST, DEFAULT_MIN_PERIODS_BEST))
|
||||||
|
relaxation_attempts = int(relaxation_settings.get(CONF_RELAXATION_ATTEMPTS_BEST, DEFAULT_RELAXATION_ATTEMPTS_BEST))
|
||||||
|
|
||||||
|
# Build section schemas with optional override warnings
|
||||||
|
period_warning = get_section_override_warning("best_price", "period_settings", overrides, translations) or {}
|
||||||
|
period_fields: dict[vol.Optional | vol.Required, Any] = {
|
||||||
|
**period_warning, # type: ignore[misc]
|
||||||
|
vol.Optional(
|
||||||
|
CONF_BEST_PRICE_MIN_PERIOD_LENGTH,
|
||||||
|
default=min_period_length,
|
||||||
|
): NumberSelector(
|
||||||
|
NumberSelectorConfig(
|
||||||
|
min=MIN_PERIOD_LENGTH,
|
||||||
|
max=MAX_MIN_PERIOD_LENGTH,
|
||||||
|
step=15,
|
||||||
|
unit_of_measurement="min",
|
||||||
|
mode=NumberSelectorMode.SLIDER,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
vol.Optional(
|
||||||
|
CONF_BEST_PRICE_MAX_LEVEL,
|
||||||
|
default=period_settings.get(
|
||||||
|
CONF_BEST_PRICE_MAX_LEVEL,
|
||||||
|
DEFAULT_BEST_PRICE_MAX_LEVEL,
|
||||||
|
),
|
||||||
|
): SelectSelector(
|
||||||
|
SelectSelectorConfig(
|
||||||
|
options=BEST_PRICE_MAX_LEVEL_OPTIONS,
|
||||||
|
mode=SelectSelectorMode.DROPDOWN,
|
||||||
|
translation_key="current_interval_price_level",
|
||||||
|
),
|
||||||
|
),
|
||||||
|
vol.Optional(
|
||||||
|
CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||||
|
default=max_level_gap_count,
|
||||||
|
): NumberSelector(
|
||||||
|
NumberSelectorConfig(
|
||||||
|
min=MIN_GAP_COUNT,
|
||||||
|
max=MAX_GAP_COUNT,
|
||||||
|
step=1,
|
||||||
|
mode=NumberSelectorMode.SLIDER,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
flexibility_warning = (
|
||||||
|
get_section_override_warning("best_price", "flexibility_settings", overrides, translations) or {}
|
||||||
|
)
|
||||||
|
flexibility_fields: dict[vol.Optional | vol.Required, Any] = {
|
||||||
|
**flexibility_warning, # type: ignore[misc]
|
||||||
|
vol.Optional(
|
||||||
|
CONF_BEST_PRICE_FLEX,
|
||||||
|
default=best_price_flex,
|
||||||
|
): NumberSelector(
|
||||||
|
NumberSelectorConfig(
|
||||||
|
min=0,
|
||||||
|
max=50,
|
||||||
|
step=1,
|
||||||
|
unit_of_measurement="%",
|
||||||
|
mode=NumberSelectorMode.SLIDER,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
vol.Optional(
|
||||||
|
CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||||
|
default=min_distance,
|
||||||
|
): NumberSelector(
|
||||||
|
NumberSelectorConfig(
|
||||||
|
min=-50,
|
||||||
|
max=0,
|
||||||
|
step=1,
|
||||||
|
unit_of_measurement="%",
|
||||||
|
mode=NumberSelectorMode.SLIDER,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
relaxation_warning = (
|
||||||
|
get_section_override_warning("best_price", "relaxation_and_target_periods", overrides, translations) or {}
|
||||||
|
)
|
||||||
|
relaxation_fields: dict[vol.Optional | vol.Required, Any] = {
|
||||||
|
**relaxation_warning, # type: ignore[misc]
|
||||||
|
vol.Optional(
|
||||||
|
CONF_ENABLE_MIN_PERIODS_BEST,
|
||||||
|
default=enable_min_periods,
|
||||||
|
): BooleanSelector(selector.BooleanSelectorConfig()),
|
||||||
|
vol.Optional(
|
||||||
|
CONF_MIN_PERIODS_BEST,
|
||||||
|
default=min_periods,
|
||||||
|
): NumberSelector(
|
||||||
|
NumberSelectorConfig(
|
||||||
|
min=1,
|
||||||
|
max=MAX_MIN_PERIODS,
|
||||||
|
step=1,
|
||||||
|
mode=NumberSelectorMode.SLIDER,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
vol.Optional(
|
||||||
|
CONF_RELAXATION_ATTEMPTS_BEST,
|
||||||
|
default=relaxation_attempts,
|
||||||
|
): NumberSelector(
|
||||||
|
NumberSelectorConfig(
|
||||||
|
min=MIN_RELAXATION_ATTEMPTS,
|
||||||
|
max=MAX_RELAXATION_ATTEMPTS,
|
||||||
|
step=1,
|
||||||
|
mode=NumberSelectorMode.SLIDER,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
return vol.Schema(
|
return vol.Schema(
|
||||||
{
|
{
|
||||||
vol.Required("period_settings"): section(
|
vol.Required("period_settings"): section(
|
||||||
vol.Schema(
|
vol.Schema(period_fields),
|
||||||
{
|
|
||||||
vol.Optional(
|
|
||||||
CONF_BEST_PRICE_MIN_PERIOD_LENGTH,
|
|
||||||
default=int(
|
|
||||||
period_settings.get(
|
|
||||||
CONF_BEST_PRICE_MIN_PERIOD_LENGTH,
|
|
||||||
DEFAULT_BEST_PRICE_MIN_PERIOD_LENGTH,
|
|
||||||
)
|
|
||||||
),
|
|
||||||
): NumberSelector(
|
|
||||||
NumberSelectorConfig(
|
|
||||||
min=MIN_PERIOD_LENGTH,
|
|
||||||
max=MAX_MIN_PERIOD_LENGTH,
|
|
||||||
step=15,
|
|
||||||
unit_of_measurement="min",
|
|
||||||
mode=NumberSelectorMode.SLIDER,
|
|
||||||
),
|
|
||||||
),
|
|
||||||
vol.Optional(
|
|
||||||
CONF_BEST_PRICE_MAX_LEVEL,
|
|
||||||
default=period_settings.get(
|
|
||||||
CONF_BEST_PRICE_MAX_LEVEL,
|
|
||||||
DEFAULT_BEST_PRICE_MAX_LEVEL,
|
|
||||||
),
|
|
||||||
): SelectSelector(
|
|
||||||
SelectSelectorConfig(
|
|
||||||
options=BEST_PRICE_MAX_LEVEL_OPTIONS,
|
|
||||||
mode=SelectSelectorMode.DROPDOWN,
|
|
||||||
translation_key="current_interval_price_level",
|
|
||||||
),
|
|
||||||
),
|
|
||||||
vol.Optional(
|
|
||||||
CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
|
||||||
default=int(
|
|
||||||
period_settings.get(
|
|
||||||
CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
|
||||||
DEFAULT_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
|
||||||
)
|
|
||||||
),
|
|
||||||
): NumberSelector(
|
|
||||||
NumberSelectorConfig(
|
|
||||||
min=MIN_GAP_COUNT,
|
|
||||||
max=MAX_GAP_COUNT,
|
|
||||||
step=1,
|
|
||||||
mode=NumberSelectorMode.SLIDER,
|
|
||||||
),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
),
|
|
||||||
{"collapsed": False},
|
{"collapsed": False},
|
||||||
),
|
),
|
||||||
vol.Required("flexibility_settings"): section(
|
vol.Required("flexibility_settings"): section(
|
||||||
vol.Schema(
|
vol.Schema(flexibility_fields),
|
||||||
{
|
|
||||||
vol.Optional(
|
|
||||||
CONF_BEST_PRICE_FLEX,
|
|
||||||
default=int(
|
|
||||||
options.get("flexibility_settings", {}).get(
|
|
||||||
CONF_BEST_PRICE_FLEX,
|
|
||||||
DEFAULT_BEST_PRICE_FLEX,
|
|
||||||
)
|
|
||||||
),
|
|
||||||
): NumberSelector(
|
|
||||||
NumberSelectorConfig(
|
|
||||||
min=0,
|
|
||||||
max=50,
|
|
||||||
step=1,
|
|
||||||
unit_of_measurement="%",
|
|
||||||
mode=NumberSelectorMode.SLIDER,
|
|
||||||
),
|
|
||||||
),
|
|
||||||
vol.Optional(
|
|
||||||
CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG,
|
|
||||||
default=int(
|
|
||||||
options.get("flexibility_settings", {}).get(
|
|
||||||
CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG,
|
|
||||||
DEFAULT_BEST_PRICE_MIN_DISTANCE_FROM_AVG,
|
|
||||||
)
|
|
||||||
),
|
|
||||||
): NumberSelector(
|
|
||||||
NumberSelectorConfig(
|
|
||||||
min=-50,
|
|
||||||
max=0,
|
|
||||||
step=1,
|
|
||||||
unit_of_measurement="%",
|
|
||||||
mode=NumberSelectorMode.SLIDER,
|
|
||||||
),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
),
|
|
||||||
{"collapsed": True},
|
{"collapsed": True},
|
||||||
),
|
),
|
||||||
vol.Required("relaxation_and_target_periods"): section(
|
vol.Required("relaxation_and_target_periods"): section(
|
||||||
vol.Schema(
|
vol.Schema(relaxation_fields),
|
||||||
{
|
|
||||||
vol.Optional(
|
|
||||||
CONF_ENABLE_MIN_PERIODS_BEST,
|
|
||||||
default=options.get("relaxation_and_target_periods", {}).get(
|
|
||||||
CONF_ENABLE_MIN_PERIODS_BEST,
|
|
||||||
DEFAULT_ENABLE_MIN_PERIODS_BEST,
|
|
||||||
),
|
|
||||||
): BooleanSelector(),
|
|
||||||
vol.Optional(
|
|
||||||
CONF_MIN_PERIODS_BEST,
|
|
||||||
default=int(
|
|
||||||
options.get("relaxation_and_target_periods", {}).get(
|
|
||||||
CONF_MIN_PERIODS_BEST,
|
|
||||||
DEFAULT_MIN_PERIODS_BEST,
|
|
||||||
)
|
|
||||||
),
|
|
||||||
): NumberSelector(
|
|
||||||
NumberSelectorConfig(
|
|
||||||
min=1,
|
|
||||||
max=MAX_MIN_PERIODS,
|
|
||||||
step=1,
|
|
||||||
mode=NumberSelectorMode.SLIDER,
|
|
||||||
),
|
|
||||||
),
|
|
||||||
vol.Optional(
|
|
||||||
CONF_RELAXATION_ATTEMPTS_BEST,
|
|
||||||
default=int(
|
|
||||||
options.get("relaxation_and_target_periods", {}).get(
|
|
||||||
CONF_RELAXATION_ATTEMPTS_BEST,
|
|
||||||
DEFAULT_RELAXATION_ATTEMPTS_BEST,
|
|
||||||
)
|
|
||||||
),
|
|
||||||
): NumberSelector(
|
|
||||||
NumberSelectorConfig(
|
|
||||||
min=MIN_RELAXATION_ATTEMPTS,
|
|
||||||
max=MAX_RELAXATION_ATTEMPTS,
|
|
||||||
step=1,
|
|
||||||
mode=NumberSelectorMode.SLIDER,
|
|
||||||
),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
),
|
|
||||||
{"collapsed": True},
|
{"collapsed": True},
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_peak_price_schema(options: Mapping[str, Any]) -> vol.Schema:
|
def get_peak_price_schema(
|
||||||
"""Return schema for peak price period configuration with collapsible sections."""
|
options: Mapping[str, Any],
|
||||||
|
overrides: ConfigOverrides | None = None,
|
||||||
|
translations: OverrideTranslations | None = None,
|
||||||
|
) -> vol.Schema:
|
||||||
|
"""
|
||||||
|
Return schema for peak price period configuration with collapsible sections.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
options: Current options from config entry
|
||||||
|
overrides: Active runtime overrides from coordinator. Fields with active
|
||||||
|
overrides will be replaced with a constant placeholder.
|
||||||
|
translations: Override translations from common section (optional)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Voluptuous schema for the peak price configuration form
|
||||||
|
|
||||||
|
"""
|
||||||
period_settings = options.get("period_settings", {})
|
period_settings = options.get("period_settings", {})
|
||||||
|
flexibility_settings = options.get("flexibility_settings", {})
|
||||||
|
relaxation_settings = options.get("relaxation_and_target_periods", {})
|
||||||
|
|
||||||
|
# Get current values for override display
|
||||||
|
min_period_length = int(
|
||||||
|
period_settings.get(CONF_PEAK_PRICE_MIN_PERIOD_LENGTH, DEFAULT_PEAK_PRICE_MIN_PERIOD_LENGTH)
|
||||||
|
)
|
||||||
|
max_level_gap_count = int(
|
||||||
|
period_settings.get(CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT, DEFAULT_PEAK_PRICE_MAX_LEVEL_GAP_COUNT)
|
||||||
|
)
|
||||||
|
peak_price_flex = int(flexibility_settings.get(CONF_PEAK_PRICE_FLEX, DEFAULT_PEAK_PRICE_FLEX))
|
||||||
|
min_distance = int(
|
||||||
|
flexibility_settings.get(CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG, DEFAULT_PEAK_PRICE_MIN_DISTANCE_FROM_AVG)
|
||||||
|
)
|
||||||
|
enable_min_periods = relaxation_settings.get(CONF_ENABLE_MIN_PERIODS_PEAK, DEFAULT_ENABLE_MIN_PERIODS_PEAK)
|
||||||
|
min_periods = int(relaxation_settings.get(CONF_MIN_PERIODS_PEAK, DEFAULT_MIN_PERIODS_PEAK))
|
||||||
|
relaxation_attempts = int(relaxation_settings.get(CONF_RELAXATION_ATTEMPTS_PEAK, DEFAULT_RELAXATION_ATTEMPTS_PEAK))
|
||||||
|
|
||||||
|
# Build section schemas with optional override warnings
|
||||||
|
period_warning = get_section_override_warning("peak_price", "period_settings", overrides, translations) or {}
|
||||||
|
period_fields: dict[vol.Optional | vol.Required, Any] = {
|
||||||
|
**period_warning, # type: ignore[misc]
|
||||||
|
vol.Optional(
|
||||||
|
CONF_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
||||||
|
default=min_period_length,
|
||||||
|
): NumberSelector(
|
||||||
|
NumberSelectorConfig(
|
||||||
|
min=MIN_PERIOD_LENGTH,
|
||||||
|
max=MAX_MIN_PERIOD_LENGTH,
|
||||||
|
step=15,
|
||||||
|
unit_of_measurement="min",
|
||||||
|
mode=NumberSelectorMode.SLIDER,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
vol.Optional(
|
||||||
|
CONF_PEAK_PRICE_MIN_LEVEL,
|
||||||
|
default=period_settings.get(
|
||||||
|
CONF_PEAK_PRICE_MIN_LEVEL,
|
||||||
|
DEFAULT_PEAK_PRICE_MIN_LEVEL,
|
||||||
|
),
|
||||||
|
): SelectSelector(
|
||||||
|
SelectSelectorConfig(
|
||||||
|
options=PEAK_PRICE_MIN_LEVEL_OPTIONS,
|
||||||
|
mode=SelectSelectorMode.DROPDOWN,
|
||||||
|
translation_key="current_interval_price_level",
|
||||||
|
),
|
||||||
|
),
|
||||||
|
vol.Optional(
|
||||||
|
CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||||
|
default=max_level_gap_count,
|
||||||
|
): NumberSelector(
|
||||||
|
NumberSelectorConfig(
|
||||||
|
min=MIN_GAP_COUNT,
|
||||||
|
max=MAX_GAP_COUNT,
|
||||||
|
step=1,
|
||||||
|
mode=NumberSelectorMode.SLIDER,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
flexibility_warning = (
|
||||||
|
get_section_override_warning("peak_price", "flexibility_settings", overrides, translations) or {}
|
||||||
|
)
|
||||||
|
flexibility_fields: dict[vol.Optional | vol.Required, Any] = {
|
||||||
|
**flexibility_warning, # type: ignore[misc]
|
||||||
|
vol.Optional(
|
||||||
|
CONF_PEAK_PRICE_FLEX,
|
||||||
|
default=peak_price_flex,
|
||||||
|
): NumberSelector(
|
||||||
|
NumberSelectorConfig(
|
||||||
|
min=-50,
|
||||||
|
max=0,
|
||||||
|
step=1,
|
||||||
|
unit_of_measurement="%",
|
||||||
|
mode=NumberSelectorMode.SLIDER,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
vol.Optional(
|
||||||
|
CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||||
|
default=min_distance,
|
||||||
|
): NumberSelector(
|
||||||
|
NumberSelectorConfig(
|
||||||
|
min=0,
|
||||||
|
max=50,
|
||||||
|
step=1,
|
||||||
|
unit_of_measurement="%",
|
||||||
|
mode=NumberSelectorMode.SLIDER,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
relaxation_warning = (
|
||||||
|
get_section_override_warning("peak_price", "relaxation_and_target_periods", overrides, translations) or {}
|
||||||
|
)
|
||||||
|
relaxation_fields: dict[vol.Optional | vol.Required, Any] = {
|
||||||
|
**relaxation_warning, # type: ignore[misc]
|
||||||
|
vol.Optional(
|
||||||
|
CONF_ENABLE_MIN_PERIODS_PEAK,
|
||||||
|
default=enable_min_periods,
|
||||||
|
): BooleanSelector(selector.BooleanSelectorConfig()),
|
||||||
|
vol.Optional(
|
||||||
|
CONF_MIN_PERIODS_PEAK,
|
||||||
|
default=min_periods,
|
||||||
|
): NumberSelector(
|
||||||
|
NumberSelectorConfig(
|
||||||
|
min=1,
|
||||||
|
max=MAX_MIN_PERIODS,
|
||||||
|
step=1,
|
||||||
|
mode=NumberSelectorMode.SLIDER,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
vol.Optional(
|
||||||
|
CONF_RELAXATION_ATTEMPTS_PEAK,
|
||||||
|
default=relaxation_attempts,
|
||||||
|
): NumberSelector(
|
||||||
|
NumberSelectorConfig(
|
||||||
|
min=MIN_RELAXATION_ATTEMPTS,
|
||||||
|
max=MAX_RELAXATION_ATTEMPTS,
|
||||||
|
step=1,
|
||||||
|
mode=NumberSelectorMode.SLIDER,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
return vol.Schema(
|
return vol.Schema(
|
||||||
{
|
{
|
||||||
vol.Required("period_settings"): section(
|
vol.Required("period_settings"): section(
|
||||||
vol.Schema(
|
vol.Schema(period_fields),
|
||||||
{
|
|
||||||
vol.Optional(
|
|
||||||
CONF_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
|
||||||
default=int(
|
|
||||||
period_settings.get(
|
|
||||||
CONF_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
|
||||||
DEFAULT_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
|
||||||
)
|
|
||||||
),
|
|
||||||
): NumberSelector(
|
|
||||||
NumberSelectorConfig(
|
|
||||||
min=MIN_PERIOD_LENGTH,
|
|
||||||
max=MAX_MIN_PERIOD_LENGTH,
|
|
||||||
step=15,
|
|
||||||
unit_of_measurement="min",
|
|
||||||
mode=NumberSelectorMode.SLIDER,
|
|
||||||
),
|
|
||||||
),
|
|
||||||
vol.Optional(
|
|
||||||
CONF_PEAK_PRICE_MIN_LEVEL,
|
|
||||||
default=period_settings.get(
|
|
||||||
CONF_PEAK_PRICE_MIN_LEVEL,
|
|
||||||
DEFAULT_PEAK_PRICE_MIN_LEVEL,
|
|
||||||
),
|
|
||||||
): SelectSelector(
|
|
||||||
SelectSelectorConfig(
|
|
||||||
options=PEAK_PRICE_MIN_LEVEL_OPTIONS,
|
|
||||||
mode=SelectSelectorMode.DROPDOWN,
|
|
||||||
translation_key="current_interval_price_level",
|
|
||||||
),
|
|
||||||
),
|
|
||||||
vol.Optional(
|
|
||||||
CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
|
||||||
default=int(
|
|
||||||
period_settings.get(
|
|
||||||
CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
|
||||||
DEFAULT_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
|
||||||
)
|
|
||||||
),
|
|
||||||
): NumberSelector(
|
|
||||||
NumberSelectorConfig(
|
|
||||||
min=MIN_GAP_COUNT,
|
|
||||||
max=MAX_GAP_COUNT,
|
|
||||||
step=1,
|
|
||||||
mode=NumberSelectorMode.SLIDER,
|
|
||||||
),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
),
|
|
||||||
{"collapsed": False},
|
{"collapsed": False},
|
||||||
),
|
),
|
||||||
vol.Required("flexibility_settings"): section(
|
vol.Required("flexibility_settings"): section(
|
||||||
vol.Schema(
|
vol.Schema(flexibility_fields),
|
||||||
{
|
|
||||||
vol.Optional(
|
|
||||||
CONF_PEAK_PRICE_FLEX,
|
|
||||||
default=int(
|
|
||||||
options.get("flexibility_settings", {}).get(
|
|
||||||
CONF_PEAK_PRICE_FLEX,
|
|
||||||
DEFAULT_PEAK_PRICE_FLEX,
|
|
||||||
)
|
|
||||||
),
|
|
||||||
): NumberSelector(
|
|
||||||
NumberSelectorConfig(
|
|
||||||
min=-50,
|
|
||||||
max=0,
|
|
||||||
step=1,
|
|
||||||
unit_of_measurement="%",
|
|
||||||
mode=NumberSelectorMode.SLIDER,
|
|
||||||
),
|
|
||||||
),
|
|
||||||
vol.Optional(
|
|
||||||
CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
|
||||||
default=int(
|
|
||||||
options.get("flexibility_settings", {}).get(
|
|
||||||
CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
|
||||||
DEFAULT_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
|
||||||
)
|
|
||||||
),
|
|
||||||
): NumberSelector(
|
|
||||||
NumberSelectorConfig(
|
|
||||||
min=0,
|
|
||||||
max=50,
|
|
||||||
step=1,
|
|
||||||
unit_of_measurement="%",
|
|
||||||
mode=NumberSelectorMode.SLIDER,
|
|
||||||
),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
),
|
|
||||||
{"collapsed": True},
|
{"collapsed": True},
|
||||||
),
|
),
|
||||||
vol.Required("relaxation_and_target_periods"): section(
|
vol.Required("relaxation_and_target_periods"): section(
|
||||||
vol.Schema(
|
vol.Schema(relaxation_fields),
|
||||||
{
|
|
||||||
vol.Optional(
|
|
||||||
CONF_ENABLE_MIN_PERIODS_PEAK,
|
|
||||||
default=options.get("relaxation_and_target_periods", {}).get(
|
|
||||||
CONF_ENABLE_MIN_PERIODS_PEAK,
|
|
||||||
DEFAULT_ENABLE_MIN_PERIODS_PEAK,
|
|
||||||
),
|
|
||||||
): BooleanSelector(),
|
|
||||||
vol.Optional(
|
|
||||||
CONF_MIN_PERIODS_PEAK,
|
|
||||||
default=int(
|
|
||||||
options.get("relaxation_and_target_periods", {}).get(
|
|
||||||
CONF_MIN_PERIODS_PEAK,
|
|
||||||
DEFAULT_MIN_PERIODS_PEAK,
|
|
||||||
)
|
|
||||||
),
|
|
||||||
): NumberSelector(
|
|
||||||
NumberSelectorConfig(
|
|
||||||
min=1,
|
|
||||||
max=MAX_MIN_PERIODS,
|
|
||||||
step=1,
|
|
||||||
mode=NumberSelectorMode.SLIDER,
|
|
||||||
),
|
|
||||||
),
|
|
||||||
vol.Optional(
|
|
||||||
CONF_RELAXATION_ATTEMPTS_PEAK,
|
|
||||||
default=int(
|
|
||||||
options.get("relaxation_and_target_periods", {}).get(
|
|
||||||
CONF_RELAXATION_ATTEMPTS_PEAK,
|
|
||||||
DEFAULT_RELAXATION_ATTEMPTS_PEAK,
|
|
||||||
)
|
|
||||||
),
|
|
||||||
): NumberSelector(
|
|
||||||
NumberSelectorConfig(
|
|
||||||
min=MIN_RELAXATION_ATTEMPTS,
|
|
||||||
max=MAX_RELAXATION_ATTEMPTS,
|
|
||||||
step=1,
|
|
||||||
mode=NumberSelectorMode.SLIDER,
|
|
||||||
),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
),
|
|
||||||
{"collapsed": True},
|
{"collapsed": True},
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
@ -676,6 +928,23 @@ def get_price_trend_schema(options: Mapping[str, Any]) -> vol.Schema:
|
||||||
mode=NumberSelectorMode.SLIDER,
|
mode=NumberSelectorMode.SLIDER,
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
|
vol.Optional(
|
||||||
|
CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING,
|
||||||
|
default=int(
|
||||||
|
options.get(
|
||||||
|
CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING,
|
||||||
|
DEFAULT_PRICE_TREND_THRESHOLD_STRONGLY_RISING,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
): NumberSelector(
|
||||||
|
NumberSelectorConfig(
|
||||||
|
min=MIN_PRICE_TREND_STRONGLY_RISING,
|
||||||
|
max=MAX_PRICE_TREND_STRONGLY_RISING,
|
||||||
|
step=1,
|
||||||
|
unit_of_measurement="%",
|
||||||
|
mode=NumberSelectorMode.SLIDER,
|
||||||
|
),
|
||||||
|
),
|
||||||
vol.Optional(
|
vol.Optional(
|
||||||
CONF_PRICE_TREND_THRESHOLD_FALLING,
|
CONF_PRICE_TREND_THRESHOLD_FALLING,
|
||||||
default=int(
|
default=int(
|
||||||
|
|
@ -693,6 +962,23 @@ def get_price_trend_schema(options: Mapping[str, Any]) -> vol.Schema:
|
||||||
mode=NumberSelectorMode.SLIDER,
|
mode=NumberSelectorMode.SLIDER,
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
|
vol.Optional(
|
||||||
|
CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING,
|
||||||
|
default=int(
|
||||||
|
options.get(
|
||||||
|
CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING,
|
||||||
|
DEFAULT_PRICE_TREND_THRESHOLD_STRONGLY_FALLING,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
): NumberSelector(
|
||||||
|
NumberSelectorConfig(
|
||||||
|
min=MIN_PRICE_TREND_STRONGLY_FALLING,
|
||||||
|
max=MAX_PRICE_TREND_STRONGLY_FALLING,
|
||||||
|
step=1,
|
||||||
|
unit_of_measurement="%",
|
||||||
|
mode=NumberSelectorMode.SLIDER,
|
||||||
|
),
|
||||||
|
),
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -141,6 +141,7 @@ class TibberPricesConfigFlowHandler(ConfigFlow, domain=DOMAIN):
|
||||||
step_id="reauth_confirm",
|
step_id="reauth_confirm",
|
||||||
data_schema=get_reauth_confirm_schema(),
|
data_schema=get_reauth_confirm_schema(),
|
||||||
errors=_errors,
|
errors=_errors,
|
||||||
|
description_placeholders={"tibber_url": "https://developer.tibber.com"},
|
||||||
)
|
)
|
||||||
|
|
||||||
async def async_step_user(
|
async def async_step_user(
|
||||||
|
|
@ -291,6 +292,7 @@ class TibberPricesConfigFlowHandler(ConfigFlow, domain=DOMAIN):
|
||||||
step_id="new_token",
|
step_id="new_token",
|
||||||
data_schema=get_user_schema((user_input or {}).get(CONF_ACCESS_TOKEN)),
|
data_schema=get_user_schema((user_input or {}).get(CONF_ACCESS_TOKEN)),
|
||||||
errors=_errors,
|
errors=_errors,
|
||||||
|
description_placeholders={"tibber_url": "https://developer.tibber.com"},
|
||||||
)
|
)
|
||||||
|
|
||||||
async def async_step_select_home(self, user_input: dict | None = None) -> ConfigFlowResult: # noqa: PLR0911
|
async def async_step_select_home(self, user_input: dict | None = None) -> ConfigFlowResult: # noqa: PLR0911
|
||||||
|
|
|
||||||
|
|
@ -20,6 +20,8 @@ from custom_components.tibber_prices.const import (
|
||||||
MAX_PRICE_RATING_THRESHOLD_LOW,
|
MAX_PRICE_RATING_THRESHOLD_LOW,
|
||||||
MAX_PRICE_TREND_FALLING,
|
MAX_PRICE_TREND_FALLING,
|
||||||
MAX_PRICE_TREND_RISING,
|
MAX_PRICE_TREND_RISING,
|
||||||
|
MAX_PRICE_TREND_STRONGLY_FALLING,
|
||||||
|
MAX_PRICE_TREND_STRONGLY_RISING,
|
||||||
MAX_RELAXATION_ATTEMPTS,
|
MAX_RELAXATION_ATTEMPTS,
|
||||||
MAX_VOLATILITY_THRESHOLD_HIGH,
|
MAX_VOLATILITY_THRESHOLD_HIGH,
|
||||||
MAX_VOLATILITY_THRESHOLD_MODERATE,
|
MAX_VOLATILITY_THRESHOLD_MODERATE,
|
||||||
|
|
@ -30,6 +32,8 @@ from custom_components.tibber_prices.const import (
|
||||||
MIN_PRICE_RATING_THRESHOLD_LOW,
|
MIN_PRICE_RATING_THRESHOLD_LOW,
|
||||||
MIN_PRICE_TREND_FALLING,
|
MIN_PRICE_TREND_FALLING,
|
||||||
MIN_PRICE_TREND_RISING,
|
MIN_PRICE_TREND_RISING,
|
||||||
|
MIN_PRICE_TREND_STRONGLY_FALLING,
|
||||||
|
MIN_PRICE_TREND_STRONGLY_RISING,
|
||||||
MIN_RELAXATION_ATTEMPTS,
|
MIN_RELAXATION_ATTEMPTS,
|
||||||
MIN_VOLATILITY_THRESHOLD_HIGH,
|
MIN_VOLATILITY_THRESHOLD_HIGH,
|
||||||
MIN_VOLATILITY_THRESHOLD_MODERATE,
|
MIN_VOLATILITY_THRESHOLD_MODERATE,
|
||||||
|
|
@ -337,3 +341,31 @@ def validate_price_trend_falling(threshold: int) -> bool:
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return MIN_PRICE_TREND_FALLING <= threshold <= MAX_PRICE_TREND_FALLING
|
return MIN_PRICE_TREND_FALLING <= threshold <= MAX_PRICE_TREND_FALLING
|
||||||
|
|
||||||
|
|
||||||
|
def validate_price_trend_strongly_rising(threshold: int) -> bool:
|
||||||
|
"""
|
||||||
|
Validate strongly rising price trend threshold.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
threshold: Strongly rising trend threshold percentage (2 to 100)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if threshold is valid (MIN_PRICE_TREND_STRONGLY_RISING to MAX_PRICE_TREND_STRONGLY_RISING)
|
||||||
|
|
||||||
|
"""
|
||||||
|
return MIN_PRICE_TREND_STRONGLY_RISING <= threshold <= MAX_PRICE_TREND_STRONGLY_RISING
|
||||||
|
|
||||||
|
|
||||||
|
def validate_price_trend_strongly_falling(threshold: int) -> bool:
|
||||||
|
"""
|
||||||
|
Validate strongly falling price trend threshold.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
threshold: Strongly falling trend threshold percentage (-100 to -2)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if threshold is valid (MIN_PRICE_TREND_STRONGLY_FALLING to MAX_PRICE_TREND_STRONGLY_FALLING)
|
||||||
|
|
||||||
|
"""
|
||||||
|
return MIN_PRICE_TREND_STRONGLY_FALLING <= threshold <= MAX_PRICE_TREND_STRONGLY_FALLING
|
||||||
|
|
|
||||||
|
|
@ -44,9 +44,14 @@ CONF_BEST_PRICE_MIN_PERIOD_LENGTH = "best_price_min_period_length"
|
||||||
CONF_PEAK_PRICE_MIN_PERIOD_LENGTH = "peak_price_min_period_length"
|
CONF_PEAK_PRICE_MIN_PERIOD_LENGTH = "peak_price_min_period_length"
|
||||||
CONF_PRICE_RATING_THRESHOLD_LOW = "price_rating_threshold_low"
|
CONF_PRICE_RATING_THRESHOLD_LOW = "price_rating_threshold_low"
|
||||||
CONF_PRICE_RATING_THRESHOLD_HIGH = "price_rating_threshold_high"
|
CONF_PRICE_RATING_THRESHOLD_HIGH = "price_rating_threshold_high"
|
||||||
|
CONF_PRICE_RATING_HYSTERESIS = "price_rating_hysteresis"
|
||||||
|
CONF_PRICE_RATING_GAP_TOLERANCE = "price_rating_gap_tolerance"
|
||||||
|
CONF_PRICE_LEVEL_GAP_TOLERANCE = "price_level_gap_tolerance"
|
||||||
CONF_AVERAGE_SENSOR_DISPLAY = "average_sensor_display" # "median" or "mean"
|
CONF_AVERAGE_SENSOR_DISPLAY = "average_sensor_display" # "median" or "mean"
|
||||||
CONF_PRICE_TREND_THRESHOLD_RISING = "price_trend_threshold_rising"
|
CONF_PRICE_TREND_THRESHOLD_RISING = "price_trend_threshold_rising"
|
||||||
CONF_PRICE_TREND_THRESHOLD_FALLING = "price_trend_threshold_falling"
|
CONF_PRICE_TREND_THRESHOLD_FALLING = "price_trend_threshold_falling"
|
||||||
|
CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING = "price_trend_threshold_strongly_rising"
|
||||||
|
CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING = "price_trend_threshold_strongly_falling"
|
||||||
CONF_VOLATILITY_THRESHOLD_MODERATE = "volatility_threshold_moderate"
|
CONF_VOLATILITY_THRESHOLD_MODERATE = "volatility_threshold_moderate"
|
||||||
CONF_VOLATILITY_THRESHOLD_HIGH = "volatility_threshold_high"
|
CONF_VOLATILITY_THRESHOLD_HIGH = "volatility_threshold_high"
|
||||||
CONF_VOLATILITY_THRESHOLD_VERY_HIGH = "volatility_threshold_very_high"
|
CONF_VOLATILITY_THRESHOLD_VERY_HIGH = "volatility_threshold_very_high"
|
||||||
|
|
@ -92,9 +97,16 @@ DEFAULT_BEST_PRICE_MIN_PERIOD_LENGTH = 60 # 60 minutes minimum period length fo
|
||||||
DEFAULT_PEAK_PRICE_MIN_PERIOD_LENGTH = 30 # 30 minutes minimum period length for peak price (user-facing, minutes)
|
DEFAULT_PEAK_PRICE_MIN_PERIOD_LENGTH = 30 # 30 minutes minimum period length for peak price (user-facing, minutes)
|
||||||
DEFAULT_PRICE_RATING_THRESHOLD_LOW = -10 # Default rating threshold low percentage
|
DEFAULT_PRICE_RATING_THRESHOLD_LOW = -10 # Default rating threshold low percentage
|
||||||
DEFAULT_PRICE_RATING_THRESHOLD_HIGH = 10 # Default rating threshold high percentage
|
DEFAULT_PRICE_RATING_THRESHOLD_HIGH = 10 # Default rating threshold high percentage
|
||||||
|
DEFAULT_PRICE_RATING_HYSTERESIS = 2.0 # Hysteresis percentage to prevent flickering at threshold boundaries
|
||||||
|
DEFAULT_PRICE_RATING_GAP_TOLERANCE = 1 # Max consecutive intervals to smooth out (0 = disabled)
|
||||||
|
DEFAULT_PRICE_LEVEL_GAP_TOLERANCE = 1 # Max consecutive intervals to smooth out for price level (0 = disabled)
|
||||||
DEFAULT_AVERAGE_SENSOR_DISPLAY = "median" # Default: show median in state, mean in attributes
|
DEFAULT_AVERAGE_SENSOR_DISPLAY = "median" # Default: show median in state, mean in attributes
|
||||||
DEFAULT_PRICE_TREND_THRESHOLD_RISING = 3 # Default trend threshold for rising prices (%)
|
DEFAULT_PRICE_TREND_THRESHOLD_RISING = 3 # Default trend threshold for rising prices (%)
|
||||||
DEFAULT_PRICE_TREND_THRESHOLD_FALLING = -3 # Default trend threshold for falling prices (%, negative value)
|
DEFAULT_PRICE_TREND_THRESHOLD_FALLING = -3 # Default trend threshold for falling prices (%, negative value)
|
||||||
|
# Strong trend thresholds default to 2x the base threshold.
|
||||||
|
# These are independently configurable to allow fine-tuning of "strongly" detection.
|
||||||
|
DEFAULT_PRICE_TREND_THRESHOLD_STRONGLY_RISING = 6 # Default strong rising threshold (%)
|
||||||
|
DEFAULT_PRICE_TREND_THRESHOLD_STRONGLY_FALLING = -6 # Default strong falling threshold (%, negative value)
|
||||||
# Default volatility thresholds (relative values using coefficient of variation)
|
# Default volatility thresholds (relative values using coefficient of variation)
|
||||||
# Coefficient of variation = (standard_deviation / mean) * 100%
|
# Coefficient of variation = (standard_deviation / mean) * 100%
|
||||||
# These thresholds are unitless and work across different price levels
|
# These thresholds are unitless and work across different price levels
|
||||||
|
|
@ -131,6 +143,12 @@ MIN_PRICE_RATING_THRESHOLD_LOW = -50 # Minimum value for low rating threshold
|
||||||
MAX_PRICE_RATING_THRESHOLD_LOW = -5 # Maximum value for low rating threshold (must be < HIGH)
|
MAX_PRICE_RATING_THRESHOLD_LOW = -5 # Maximum value for low rating threshold (must be < HIGH)
|
||||||
MIN_PRICE_RATING_THRESHOLD_HIGH = 5 # Minimum value for high rating threshold (must be > LOW)
|
MIN_PRICE_RATING_THRESHOLD_HIGH = 5 # Minimum value for high rating threshold (must be > LOW)
|
||||||
MAX_PRICE_RATING_THRESHOLD_HIGH = 50 # Maximum value for high rating threshold
|
MAX_PRICE_RATING_THRESHOLD_HIGH = 50 # Maximum value for high rating threshold
|
||||||
|
MIN_PRICE_RATING_HYSTERESIS = 0.0 # Minimum hysteresis (0 = disabled)
|
||||||
|
MAX_PRICE_RATING_HYSTERESIS = 5.0 # Maximum hysteresis (5% band)
|
||||||
|
MIN_PRICE_RATING_GAP_TOLERANCE = 0 # Minimum gap tolerance (0 = disabled)
|
||||||
|
MAX_PRICE_RATING_GAP_TOLERANCE = 4 # Maximum gap tolerance (4 intervals = 1 hour)
|
||||||
|
MIN_PRICE_LEVEL_GAP_TOLERANCE = 0 # Minimum gap tolerance for price level (0 = disabled)
|
||||||
|
MAX_PRICE_LEVEL_GAP_TOLERANCE = 4 # Maximum gap tolerance for price level (4 intervals = 1 hour)
|
||||||
|
|
||||||
# Volatility threshold limits
|
# Volatility threshold limits
|
||||||
# MODERATE threshold: practical range 5% to 25% (entry point for noticeable fluctuation)
|
# MODERATE threshold: practical range 5% to 25% (entry point for noticeable fluctuation)
|
||||||
|
|
@ -149,6 +167,11 @@ MIN_PRICE_TREND_RISING = 1 # Minimum rising trend threshold
|
||||||
MAX_PRICE_TREND_RISING = 50 # Maximum rising trend threshold
|
MAX_PRICE_TREND_RISING = 50 # Maximum rising trend threshold
|
||||||
MIN_PRICE_TREND_FALLING = -50 # Minimum falling trend threshold (negative)
|
MIN_PRICE_TREND_FALLING = -50 # Minimum falling trend threshold (negative)
|
||||||
MAX_PRICE_TREND_FALLING = -1 # Maximum falling trend threshold (negative)
|
MAX_PRICE_TREND_FALLING = -1 # Maximum falling trend threshold (negative)
|
||||||
|
# Strong trend thresholds have higher ranges to allow detection of significant moves
|
||||||
|
MIN_PRICE_TREND_STRONGLY_RISING = 2 # Minimum strongly rising threshold (must be > rising)
|
||||||
|
MAX_PRICE_TREND_STRONGLY_RISING = 100 # Maximum strongly rising threshold
|
||||||
|
MIN_PRICE_TREND_STRONGLY_FALLING = -100 # Minimum strongly falling threshold (negative)
|
||||||
|
MAX_PRICE_TREND_STRONGLY_FALLING = -2 # Maximum strongly falling threshold (must be < falling)
|
||||||
|
|
||||||
# Gap count and relaxation limits
|
# Gap count and relaxation limits
|
||||||
MIN_GAP_COUNT = 0 # Minimum gap count
|
MIN_GAP_COUNT = 0 # Minimum gap count
|
||||||
|
|
@ -326,9 +349,12 @@ def get_default_options(currency_code: str | None) -> dict[str, Any]:
|
||||||
CONF_VIRTUAL_TIME_OFFSET_DAYS: DEFAULT_VIRTUAL_TIME_OFFSET_DAYS,
|
CONF_VIRTUAL_TIME_OFFSET_DAYS: DEFAULT_VIRTUAL_TIME_OFFSET_DAYS,
|
||||||
CONF_VIRTUAL_TIME_OFFSET_HOURS: DEFAULT_VIRTUAL_TIME_OFFSET_HOURS,
|
CONF_VIRTUAL_TIME_OFFSET_HOURS: DEFAULT_VIRTUAL_TIME_OFFSET_HOURS,
|
||||||
CONF_VIRTUAL_TIME_OFFSET_MINUTES: DEFAULT_VIRTUAL_TIME_OFFSET_MINUTES,
|
CONF_VIRTUAL_TIME_OFFSET_MINUTES: DEFAULT_VIRTUAL_TIME_OFFSET_MINUTES,
|
||||||
# Price rating thresholds (flat - single-section step)
|
# Price rating settings (flat - single-section step)
|
||||||
CONF_PRICE_RATING_THRESHOLD_LOW: DEFAULT_PRICE_RATING_THRESHOLD_LOW,
|
CONF_PRICE_RATING_THRESHOLD_LOW: DEFAULT_PRICE_RATING_THRESHOLD_LOW,
|
||||||
CONF_PRICE_RATING_THRESHOLD_HIGH: DEFAULT_PRICE_RATING_THRESHOLD_HIGH,
|
CONF_PRICE_RATING_THRESHOLD_HIGH: DEFAULT_PRICE_RATING_THRESHOLD_HIGH,
|
||||||
|
CONF_PRICE_RATING_HYSTERESIS: DEFAULT_PRICE_RATING_HYSTERESIS,
|
||||||
|
CONF_PRICE_RATING_GAP_TOLERANCE: DEFAULT_PRICE_RATING_GAP_TOLERANCE,
|
||||||
|
CONF_PRICE_LEVEL_GAP_TOLERANCE: DEFAULT_PRICE_LEVEL_GAP_TOLERANCE,
|
||||||
# Volatility thresholds (flat - single-section step)
|
# Volatility thresholds (flat - single-section step)
|
||||||
CONF_VOLATILITY_THRESHOLD_MODERATE: DEFAULT_VOLATILITY_THRESHOLD_MODERATE,
|
CONF_VOLATILITY_THRESHOLD_MODERATE: DEFAULT_VOLATILITY_THRESHOLD_MODERATE,
|
||||||
CONF_VOLATILITY_THRESHOLD_HIGH: DEFAULT_VOLATILITY_THRESHOLD_HIGH,
|
CONF_VOLATILITY_THRESHOLD_HIGH: DEFAULT_VOLATILITY_THRESHOLD_HIGH,
|
||||||
|
|
@ -432,6 +458,14 @@ VOLATILITY_MODERATE = "MODERATE"
|
||||||
VOLATILITY_HIGH = "HIGH"
|
VOLATILITY_HIGH = "HIGH"
|
||||||
VOLATILITY_VERY_HIGH = "VERY_HIGH"
|
VOLATILITY_VERY_HIGH = "VERY_HIGH"
|
||||||
|
|
||||||
|
# Price trend constants (calculated values with 5-level scale)
|
||||||
|
# Used by trend sensors: momentary, short-term, mid-term, long-term
|
||||||
|
PRICE_TREND_STRONGLY_FALLING = "strongly_falling"
|
||||||
|
PRICE_TREND_FALLING = "falling"
|
||||||
|
PRICE_TREND_STABLE = "stable"
|
||||||
|
PRICE_TREND_RISING = "rising"
|
||||||
|
PRICE_TREND_STRONGLY_RISING = "strongly_rising"
|
||||||
|
|
||||||
# Sensor options (lowercase versions for ENUM device class)
|
# Sensor options (lowercase versions for ENUM device class)
|
||||||
# NOTE: These constants define the valid enum options, but they are not used directly
|
# NOTE: These constants define the valid enum options, but they are not used directly
|
||||||
# in sensor/definitions.py due to import timing issues. Instead, the options are defined inline
|
# in sensor/definitions.py due to import timing issues. Instead, the options are defined inline
|
||||||
|
|
@ -457,6 +491,15 @@ VOLATILITY_OPTIONS = [
|
||||||
VOLATILITY_VERY_HIGH.lower(),
|
VOLATILITY_VERY_HIGH.lower(),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# Trend options for enum sensors (lowercase versions for ENUM device class)
|
||||||
|
PRICE_TREND_OPTIONS = [
|
||||||
|
PRICE_TREND_STRONGLY_FALLING,
|
||||||
|
PRICE_TREND_FALLING,
|
||||||
|
PRICE_TREND_STABLE,
|
||||||
|
PRICE_TREND_RISING,
|
||||||
|
PRICE_TREND_STRONGLY_RISING,
|
||||||
|
]
|
||||||
|
|
||||||
# Valid options for best price maximum level filter
|
# Valid options for best price maximum level filter
|
||||||
# Sorted from cheap to expensive: user selects "up to how expensive"
|
# Sorted from cheap to expensive: user selects "up to how expensive"
|
||||||
BEST_PRICE_MAX_LEVEL_OPTIONS = [
|
BEST_PRICE_MAX_LEVEL_OPTIONS = [
|
||||||
|
|
@ -499,6 +542,16 @@ PRICE_RATING_MAPPING = {
|
||||||
PRICE_RATING_HIGH: 1,
|
PRICE_RATING_HIGH: 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Mapping for comparing price trends (used for sorting and automation comparisons)
|
||||||
|
# Values range from -2 (strongly falling) to +2 (strongly rising), with 0 = stable
|
||||||
|
PRICE_TREND_MAPPING = {
|
||||||
|
PRICE_TREND_STRONGLY_FALLING: -2,
|
||||||
|
PRICE_TREND_FALLING: -1,
|
||||||
|
PRICE_TREND_STABLE: 0,
|
||||||
|
PRICE_TREND_RISING: 1,
|
||||||
|
PRICE_TREND_STRONGLY_RISING: 2,
|
||||||
|
}
|
||||||
|
|
||||||
# Icon mapping for price levels (dynamic icons based on level)
|
# Icon mapping for price levels (dynamic icons based on level)
|
||||||
PRICE_LEVEL_ICON_MAPPING = {
|
PRICE_LEVEL_ICON_MAPPING = {
|
||||||
PRICE_LEVEL_VERY_CHEAP: "mdi:gauge-empty",
|
PRICE_LEVEL_VERY_CHEAP: "mdi:gauge-empty",
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,28 @@
|
||||||
"""Cache management for coordinator module."""
|
"""
|
||||||
|
Cache management for coordinator persistent storage.
|
||||||
|
|
||||||
|
This module handles persistent storage for the coordinator, storing:
|
||||||
|
- user_data: Account/home metadata (required, refreshed daily)
|
||||||
|
- Timestamps for cache validation and lifecycle tracking
|
||||||
|
|
||||||
|
**Storage Architecture (as of v0.25.0):**
|
||||||
|
|
||||||
|
There are TWO persistent storage files per config entry:
|
||||||
|
|
||||||
|
1. `tibber_prices.{entry_id}` (this module)
|
||||||
|
- user_data: Account info, home metadata, timezone, currency
|
||||||
|
- Timestamps: last_user_update, last_midnight_check
|
||||||
|
|
||||||
|
2. `tibber_prices.interval_pool.{entry_id}` (interval_pool/storage.py)
|
||||||
|
- Intervals: Deduplicated quarter-hourly price data (source of truth)
|
||||||
|
- Fetch metadata: When each interval was fetched
|
||||||
|
- Protected range: Which intervals to keep during cleanup
|
||||||
|
|
||||||
|
**Single Source of Truth:**
|
||||||
|
Price intervals are ONLY stored in IntervalPool. This cache stores only
|
||||||
|
user metadata and timestamps. The IntervalPool handles all price data
|
||||||
|
fetching, caching, and persistence independently.
|
||||||
|
"""
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
|
@ -16,11 +40,9 @@ _LOGGER = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class TibberPricesCacheData(NamedTuple):
|
class TibberPricesCacheData(NamedTuple):
|
||||||
"""Cache data structure."""
|
"""Cache data structure for user metadata (price data is in IntervalPool)."""
|
||||||
|
|
||||||
price_data: dict[str, Any] | None
|
|
||||||
user_data: dict[str, Any] | None
|
user_data: dict[str, Any] | None
|
||||||
last_price_update: datetime | None
|
|
||||||
last_user_update: datetime | None
|
last_user_update: datetime | None
|
||||||
last_midnight_check: datetime | None
|
last_midnight_check: datetime | None
|
||||||
|
|
||||||
|
|
@ -31,20 +53,16 @@ async def load_cache(
|
||||||
*,
|
*,
|
||||||
time: TibberPricesTimeService,
|
time: TibberPricesTimeService,
|
||||||
) -> TibberPricesCacheData:
|
) -> TibberPricesCacheData:
|
||||||
"""Load cached data from storage."""
|
"""Load cached user data from storage (price data is in IntervalPool)."""
|
||||||
try:
|
try:
|
||||||
stored = await store.async_load()
|
stored = await store.async_load()
|
||||||
if stored:
|
if stored:
|
||||||
cached_price_data = stored.get("price_data")
|
|
||||||
cached_user_data = stored.get("user_data")
|
cached_user_data = stored.get("user_data")
|
||||||
|
|
||||||
# Restore timestamps
|
# Restore timestamps
|
||||||
last_price_update = None
|
|
||||||
last_user_update = None
|
last_user_update = None
|
||||||
last_midnight_check = None
|
last_midnight_check = None
|
||||||
|
|
||||||
if last_price_update_str := stored.get("last_price_update"):
|
|
||||||
last_price_update = time.parse_datetime(last_price_update_str)
|
|
||||||
if last_user_update_str := stored.get("last_user_update"):
|
if last_user_update_str := stored.get("last_user_update"):
|
||||||
last_user_update = time.parse_datetime(last_user_update_str)
|
last_user_update = time.parse_datetime(last_user_update_str)
|
||||||
if last_midnight_check_str := stored.get("last_midnight_check"):
|
if last_midnight_check_str := stored.get("last_midnight_check"):
|
||||||
|
|
@ -52,9 +70,7 @@ async def load_cache(
|
||||||
|
|
||||||
_LOGGER.debug("%s Cache loaded successfully", log_prefix)
|
_LOGGER.debug("%s Cache loaded successfully", log_prefix)
|
||||||
return TibberPricesCacheData(
|
return TibberPricesCacheData(
|
||||||
price_data=cached_price_data,
|
|
||||||
user_data=cached_user_data,
|
user_data=cached_user_data,
|
||||||
last_price_update=last_price_update,
|
|
||||||
last_user_update=last_user_update,
|
last_user_update=last_user_update,
|
||||||
last_midnight_check=last_midnight_check,
|
last_midnight_check=last_midnight_check,
|
||||||
)
|
)
|
||||||
|
|
@ -64,9 +80,7 @@ async def load_cache(
|
||||||
_LOGGER.warning("%s Failed to load cache: %s", log_prefix, ex)
|
_LOGGER.warning("%s Failed to load cache: %s", log_prefix, ex)
|
||||||
|
|
||||||
return TibberPricesCacheData(
|
return TibberPricesCacheData(
|
||||||
price_data=None,
|
|
||||||
user_data=None,
|
user_data=None,
|
||||||
last_price_update=None,
|
|
||||||
last_user_update=None,
|
last_user_update=None,
|
||||||
last_midnight_check=None,
|
last_midnight_check=None,
|
||||||
)
|
)
|
||||||
|
|
@ -77,11 +91,9 @@ async def save_cache(
|
||||||
cache_data: TibberPricesCacheData,
|
cache_data: TibberPricesCacheData,
|
||||||
log_prefix: str,
|
log_prefix: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Store cache data."""
|
"""Store cache data (user metadata only, price data is in IntervalPool)."""
|
||||||
data = {
|
data = {
|
||||||
"price_data": cache_data.price_data,
|
|
||||||
"user_data": cache_data.user_data,
|
"user_data": cache_data.user_data,
|
||||||
"last_price_update": (cache_data.last_price_update.isoformat() if cache_data.last_price_update else None),
|
|
||||||
"last_user_update": (cache_data.last_user_update.isoformat() if cache_data.last_user_update else None),
|
"last_user_update": (cache_data.last_user_update.isoformat() if cache_data.last_user_update else None),
|
||||||
"last_midnight_check": (cache_data.last_midnight_check.isoformat() if cache_data.last_midnight_check else None),
|
"last_midnight_check": (cache_data.last_midnight_check.isoformat() if cache_data.last_midnight_check else None),
|
||||||
}
|
}
|
||||||
|
|
@ -91,55 +103,3 @@ async def save_cache(
|
||||||
_LOGGER.debug("%s Cache stored successfully", log_prefix)
|
_LOGGER.debug("%s Cache stored successfully", log_prefix)
|
||||||
except OSError:
|
except OSError:
|
||||||
_LOGGER.exception("%s Failed to store cache", log_prefix)
|
_LOGGER.exception("%s Failed to store cache", log_prefix)
|
||||||
|
|
||||||
|
|
||||||
def is_cache_valid(
|
|
||||||
cache_data: TibberPricesCacheData,
|
|
||||||
log_prefix: str,
|
|
||||||
*,
|
|
||||||
time: TibberPricesTimeService,
|
|
||||||
) -> bool:
|
|
||||||
"""
|
|
||||||
Validate if cached price data is still current.
|
|
||||||
|
|
||||||
Returns False if:
|
|
||||||
- No cached data exists
|
|
||||||
- Cached data is from a different calendar day (in local timezone)
|
|
||||||
- Midnight turnover has occurred since cache was saved
|
|
||||||
- Cache structure is outdated (pre-v0.15.0 multi-home format)
|
|
||||||
|
|
||||||
"""
|
|
||||||
if cache_data.price_data is None or cache_data.last_price_update is None:
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Check for old cache structure (multi-home format from v0.14.0)
|
|
||||||
# Old format: {"homes": {home_id: {...}}}
|
|
||||||
# New format: {"home_id": str, "price_info": [...]}
|
|
||||||
if "homes" in cache_data.price_data:
|
|
||||||
_LOGGER.info(
|
|
||||||
"%s Cache has old multi-home structure (v0.14.0), invalidating to fetch fresh data",
|
|
||||||
log_prefix,
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Check for missing required keys in new structure
|
|
||||||
if "price_info" not in cache_data.price_data:
|
|
||||||
_LOGGER.info(
|
|
||||||
"%s Cache missing 'price_info' key, invalidating to fetch fresh data",
|
|
||||||
log_prefix,
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
current_local_date = time.as_local(time.now()).date()
|
|
||||||
last_update_local_date = time.as_local(cache_data.last_price_update).date()
|
|
||||||
|
|
||||||
if current_local_date != last_update_local_date:
|
|
||||||
_LOGGER.debug(
|
|
||||||
"%s Cache date mismatch: cached=%s, current=%s",
|
|
||||||
log_prefix,
|
|
||||||
last_update_local_date,
|
|
||||||
current_local_date,
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
|
||||||
|
|
@ -31,6 +31,7 @@ TIME_SENSITIVE_ENTITY_KEYS = frozenset(
|
||||||
{
|
{
|
||||||
# Current/next/previous price sensors
|
# Current/next/previous price sensors
|
||||||
"current_interval_price",
|
"current_interval_price",
|
||||||
|
"current_interval_price_base",
|
||||||
"next_interval_price",
|
"next_interval_price",
|
||||||
"previous_interval_price",
|
"previous_interval_price",
|
||||||
# Current/next/previous price levels
|
# Current/next/previous price levels
|
||||||
|
|
@ -84,7 +85,11 @@ TIME_SENSITIVE_ENTITY_KEYS = frozenset(
|
||||||
"best_price_next_start_time",
|
"best_price_next_start_time",
|
||||||
"peak_price_end_time",
|
"peak_price_end_time",
|
||||||
"peak_price_next_start_time",
|
"peak_price_next_start_time",
|
||||||
# Lifecycle sensor (needs quarter-hour updates for turnover_pending detection at 23:45)
|
# Lifecycle sensor needs quarter-hour precision for state transitions:
|
||||||
|
# - 23:45: turnover_pending (last interval before midnight)
|
||||||
|
# - 00:00: turnover complete (after midnight API update)
|
||||||
|
# - 13:00: searching_tomorrow (when tomorrow data search begins)
|
||||||
|
# Uses state-change filter in _handle_time_sensitive_update() to prevent recorder spam
|
||||||
"data_lifecycle_status",
|
"data_lifecycle_status",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,6 @@ from homeassistant.helpers.storage import Store
|
||||||
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
|
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from collections.abc import Callable
|
|
||||||
from datetime import date, datetime
|
from datetime import date, datetime
|
||||||
|
|
||||||
from homeassistant.config_entries import ConfigEntry
|
from homeassistant.config_entries import ConfigEntry
|
||||||
|
|
@ -35,11 +34,11 @@ from .constants import (
|
||||||
STORAGE_VERSION,
|
STORAGE_VERSION,
|
||||||
UPDATE_INTERVAL,
|
UPDATE_INTERVAL,
|
||||||
)
|
)
|
||||||
from .data_fetching import TibberPricesDataFetcher
|
|
||||||
from .data_transformation import TibberPricesDataTransformer
|
from .data_transformation import TibberPricesDataTransformer
|
||||||
from .listeners import TibberPricesListenerManager
|
from .listeners import TibberPricesListenerManager
|
||||||
from .midnight_handler import TibberPricesMidnightHandler
|
from .midnight_handler import TibberPricesMidnightHandler
|
||||||
from .periods import TibberPricesPeriodCalculator
|
from .periods import TibberPricesPeriodCalculator
|
||||||
|
from .price_data_manager import TibberPricesPriceDataManager
|
||||||
from .repairs import TibberPricesRepairManager
|
from .repairs import TibberPricesRepairManager
|
||||||
from .time_service import TibberPricesTimeService
|
from .time_service import TibberPricesTimeService
|
||||||
|
|
||||||
|
|
@ -206,18 +205,20 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
||||||
# Initialize helper modules
|
# Initialize helper modules
|
||||||
self._listener_manager = TibberPricesListenerManager(hass, self._log_prefix)
|
self._listener_manager = TibberPricesListenerManager(hass, self._log_prefix)
|
||||||
self._midnight_handler = TibberPricesMidnightHandler()
|
self._midnight_handler = TibberPricesMidnightHandler()
|
||||||
self._data_fetcher = TibberPricesDataFetcher(
|
self._price_data_manager = TibberPricesPriceDataManager(
|
||||||
api=self.api,
|
api=self.api,
|
||||||
store=self._store,
|
store=self._store,
|
||||||
log_prefix=self._log_prefix,
|
log_prefix=self._log_prefix,
|
||||||
user_update_interval=timedelta(days=1),
|
user_update_interval=timedelta(days=1),
|
||||||
time=self.time,
|
time=self.time,
|
||||||
home_id=self._home_id,
|
home_id=self._home_id,
|
||||||
|
interval_pool=self.interval_pool,
|
||||||
)
|
)
|
||||||
# Create period calculator BEFORE data transformer (transformer needs it in lambda)
|
# Create period calculator BEFORE data transformer (transformer needs it in lambda)
|
||||||
self._period_calculator = TibberPricesPeriodCalculator(
|
self._period_calculator = TibberPricesPeriodCalculator(
|
||||||
config_entry=config_entry,
|
config_entry=config_entry,
|
||||||
log_prefix=self._log_prefix,
|
log_prefix=self._log_prefix,
|
||||||
|
get_config_override_fn=self.get_config_override,
|
||||||
)
|
)
|
||||||
self._data_transformer = TibberPricesDataTransformer(
|
self._data_transformer = TibberPricesDataTransformer(
|
||||||
config_entry=config_entry,
|
config_entry=config_entry,
|
||||||
|
|
@ -236,22 +237,29 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
||||||
# Register options update listener to invalidate config caches
|
# Register options update listener to invalidate config caches
|
||||||
config_entry.async_on_unload(config_entry.add_update_listener(self._handle_options_update))
|
config_entry.async_on_unload(config_entry.add_update_listener(self._handle_options_update))
|
||||||
|
|
||||||
# Legacy compatibility - keep references for methods that access directly
|
# User data cache (price data is in IntervalPool)
|
||||||
self._cached_user_data: dict[str, Any] | None = None
|
self._cached_user_data: dict[str, Any] | None = None
|
||||||
self._last_user_update: datetime | None = None
|
self._last_user_update: datetime | None = None
|
||||||
self._user_update_interval = timedelta(days=1)
|
self._user_update_interval = timedelta(days=1)
|
||||||
self._cached_price_data: dict[str, Any] | None = None
|
|
||||||
self._last_price_update: datetime | None = None
|
|
||||||
|
|
||||||
# Data lifecycle tracking for diagnostic sensor
|
# Data lifecycle tracking
|
||||||
|
# Note: _lifecycle_state is used for DIAGNOSTICS only (diagnostics.py export).
|
||||||
|
# The lifecycle SENSOR calculates its state dynamically in get_lifecycle_state(),
|
||||||
|
# using: _is_fetching, last_exception, time calculations, _needs_tomorrow_data(),
|
||||||
|
# and _last_price_update. It does NOT read _lifecycle_state!
|
||||||
self._lifecycle_state: str = (
|
self._lifecycle_state: str = (
|
||||||
"cached" # Current state: cached, fresh, refreshing, searching_tomorrow, turnover_pending, error
|
"cached" # For diagnostics: cached, fresh, refreshing, searching_tomorrow, turnover_pending, error
|
||||||
)
|
)
|
||||||
|
self._last_price_update: datetime | None = None # When price data was last fetched from API
|
||||||
self._api_calls_today: int = 0 # Counter for API calls today
|
self._api_calls_today: int = 0 # Counter for API calls today
|
||||||
self._last_api_call_date: date | None = None # Date of last API call (for daily reset)
|
self._last_api_call_date: date | None = None # Date of last API call (for daily reset)
|
||||||
self._is_fetching: bool = False # Flag to track active API fetch
|
self._is_fetching: bool = False # Flag to track active API fetch (read by lifecycle sensor)
|
||||||
self._last_coordinator_update: datetime | None = None # When Timer #1 last ran (_async_update_data)
|
self._last_coordinator_update: datetime | None = None # When Timer #1 last ran (_async_update_data)
|
||||||
self._lifecycle_callbacks: list[Callable[[], None]] = [] # Push-update callbacks for lifecycle sensor
|
|
||||||
|
# Runtime config overrides from config entities (number/switch)
|
||||||
|
# Structure: {"section_name": {"config_key": value, ...}, ...}
|
||||||
|
# When set, these override the corresponding options from config_entry.options
|
||||||
|
self._config_overrides: dict[str, dict[str, Any]] = {}
|
||||||
|
|
||||||
# Start timers
|
# Start timers
|
||||||
self._listener_manager.schedule_quarter_hour_refresh(self._handle_quarter_hour_refresh)
|
self._listener_manager.schedule_quarter_hour_refresh(self._handle_quarter_hour_refresh)
|
||||||
|
|
@ -264,20 +272,128 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
||||||
|
|
||||||
async def _handle_options_update(self, _hass: HomeAssistant, _config_entry: ConfigEntry) -> None:
|
async def _handle_options_update(self, _hass: HomeAssistant, _config_entry: ConfigEntry) -> None:
|
||||||
"""Handle options update by invalidating config caches and re-transforming data."""
|
"""Handle options update by invalidating config caches and re-transforming data."""
|
||||||
self._log("debug", "Options updated, invalidating config caches")
|
self._log("debug", "Options update triggered, re-transforming data")
|
||||||
self._data_transformer.invalidate_config_cache()
|
self._data_transformer.invalidate_config_cache()
|
||||||
self._period_calculator.invalidate_config_cache()
|
self._period_calculator.invalidate_config_cache()
|
||||||
|
|
||||||
# Re-transform existing cached data with new configuration
|
# Re-transform existing data with new configuration
|
||||||
# This updates rating_levels, volatility, and period calculations
|
# This updates rating_levels, volatility, and period calculations
|
||||||
# without needing to fetch new data from the API
|
# without needing to fetch new data from the API
|
||||||
if self._cached_price_data:
|
if self.data and "priceInfo" in self.data:
|
||||||
self._log("debug", "Re-transforming cached data with new configuration")
|
# Extract raw price_info and re-transform
|
||||||
self.data = self._transform_data(self._cached_price_data)
|
raw_data = {"price_info": self.data["priceInfo"]}
|
||||||
# Notify all listeners about the updated data
|
self.data = self._transform_data(raw_data)
|
||||||
self.async_update_listeners()
|
self.async_update_listeners()
|
||||||
else:
|
else:
|
||||||
self._log("warning", "No cached data available to re-transform")
|
self._log("debug", "No data to re-transform")
|
||||||
|
|
||||||
|
# =========================================================================
|
||||||
|
# Runtime Config Override Methods (for number/switch entities)
|
||||||
|
# =========================================================================
|
||||||
|
|
||||||
|
def set_config_override(self, config_key: str, config_section: str, value: Any) -> None:
|
||||||
|
"""
|
||||||
|
Set a runtime config override value.
|
||||||
|
|
||||||
|
These overrides take precedence over options from config_entry.options
|
||||||
|
and are used by number/switch entities for runtime configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config_key: The configuration key (e.g., CONF_BEST_PRICE_FLEX)
|
||||||
|
config_section: The section in options (e.g., "flexibility_settings")
|
||||||
|
value: The override value
|
||||||
|
|
||||||
|
"""
|
||||||
|
if config_section not in self._config_overrides:
|
||||||
|
self._config_overrides[config_section] = {}
|
||||||
|
self._config_overrides[config_section][config_key] = value
|
||||||
|
self._log(
|
||||||
|
"debug",
|
||||||
|
"Config override set: %s.%s = %s",
|
||||||
|
config_section,
|
||||||
|
config_key,
|
||||||
|
value,
|
||||||
|
)
|
||||||
|
|
||||||
|
def remove_config_override(self, config_key: str, config_section: str) -> None:
|
||||||
|
"""
|
||||||
|
Remove a runtime config override value.
|
||||||
|
|
||||||
|
After removal, the value from config_entry.options will be used again.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config_key: The configuration key to remove
|
||||||
|
config_section: The section the key belongs to
|
||||||
|
|
||||||
|
"""
|
||||||
|
if config_section in self._config_overrides:
|
||||||
|
self._config_overrides[config_section].pop(config_key, None)
|
||||||
|
# Clean up empty sections
|
||||||
|
if not self._config_overrides[config_section]:
|
||||||
|
del self._config_overrides[config_section]
|
||||||
|
self._log(
|
||||||
|
"debug",
|
||||||
|
"Config override removed: %s.%s",
|
||||||
|
config_section,
|
||||||
|
config_key,
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_config_override(self, config_key: str, config_section: str) -> Any | None:
|
||||||
|
"""
|
||||||
|
Get a runtime config override value if set.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config_key: The configuration key to check
|
||||||
|
config_section: The section the key belongs to
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The override value if set, None otherwise
|
||||||
|
|
||||||
|
"""
|
||||||
|
return self._config_overrides.get(config_section, {}).get(config_key)
|
||||||
|
|
||||||
|
def has_config_override(self, config_key: str, config_section: str) -> bool:
|
||||||
|
"""
|
||||||
|
Check if a runtime config override is set.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config_key: The configuration key to check
|
||||||
|
config_section: The section the key belongs to
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if an override is set, False otherwise
|
||||||
|
|
||||||
|
"""
|
||||||
|
return config_key in self._config_overrides.get(config_section, {})
|
||||||
|
|
||||||
|
def get_active_overrides(self) -> dict[str, dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Get all active config overrides.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary of all active overrides by section
|
||||||
|
|
||||||
|
"""
|
||||||
|
return self._config_overrides.copy()
|
||||||
|
|
||||||
|
async def async_handle_config_override_update(self) -> None:
|
||||||
|
"""
|
||||||
|
Handle config override change by invalidating caches and re-transforming data.
|
||||||
|
|
||||||
|
This is called by number/switch entities when their values change.
|
||||||
|
Uses the same logic as options update to ensure consistent behavior.
|
||||||
|
"""
|
||||||
|
self._log("debug", "Config override update triggered, re-transforming data")
|
||||||
|
self._data_transformer.invalidate_config_cache()
|
||||||
|
self._period_calculator.invalidate_config_cache()
|
||||||
|
|
||||||
|
# Re-transform existing data with new configuration
|
||||||
|
if self.data and "priceInfo" in self.data:
|
||||||
|
raw_data = {"price_info": self.data["priceInfo"]}
|
||||||
|
self.data = self._transform_data(raw_data)
|
||||||
|
self.async_update_listeners()
|
||||||
|
else:
|
||||||
|
self._log("debug", "No data to re-transform")
|
||||||
|
|
||||||
@callback
|
@callback
|
||||||
def async_add_time_sensitive_listener(self, update_callback: TimeServiceCallback) -> CALLBACK_TYPE:
|
def async_add_time_sensitive_listener(self, update_callback: TimeServiceCallback) -> CALLBACK_TYPE:
|
||||||
|
|
@ -357,7 +473,7 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
||||||
|
|
||||||
# Update helper modules with fresh TimeService instance
|
# Update helper modules with fresh TimeService instance
|
||||||
self.api.time = time_service
|
self.api.time = time_service
|
||||||
self._data_fetcher.time = time_service
|
self._price_data_manager.time = time_service
|
||||||
self._data_transformer.time = time_service
|
self._data_transformer.time = time_service
|
||||||
self._period_calculator.time = time_service
|
self._period_calculator.time = time_service
|
||||||
|
|
||||||
|
|
@ -457,18 +573,13 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
||||||
current_date,
|
current_date,
|
||||||
)
|
)
|
||||||
|
|
||||||
# With flat interval list architecture, no rotation needed!
|
# With flat interval list architecture and IntervalPool as source of truth,
|
||||||
# get_intervals_for_day_offsets() automatically filters by date.
|
# no data rotation needed! get_intervals_for_day_offsets() automatically
|
||||||
# Just update coordinator's data to trigger entity updates.
|
# filters by date. Just re-transform to refresh enrichment.
|
||||||
if self.data and self._cached_price_data:
|
if self.data and "priceInfo" in self.data:
|
||||||
# Re-transform data to ensure enrichment is refreshed
|
# Re-transform data to ensure enrichment is refreshed for new day
|
||||||
self.data = self._transform_data(self._cached_price_data)
|
raw_data = {"price_info": self.data["priceInfo"]}
|
||||||
|
self.data = self._transform_data(raw_data)
|
||||||
# CRITICAL: Update _last_price_update to current time after midnight
|
|
||||||
# This prevents cache_validity from showing "date_mismatch" after midnight
|
|
||||||
# The data is still valid (just rotated today→yesterday, tomorrow→today)
|
|
||||||
# Update timestamp to reflect that the data is current for the new day
|
|
||||||
self._last_price_update = now
|
|
||||||
|
|
||||||
# Mark turnover as done for today (atomic update)
|
# Mark turnover as done for today (atomic update)
|
||||||
self._midnight_handler.mark_turnover_done(now)
|
self._midnight_handler.mark_turnover_done(now)
|
||||||
|
|
@ -555,19 +666,21 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
||||||
|
|
||||||
# Transition lifecycle state from "fresh" to "cached" if enough time passed
|
# Transition lifecycle state from "fresh" to "cached" if enough time passed
|
||||||
# (5 minutes threshold defined in lifecycle calculator)
|
# (5 minutes threshold defined in lifecycle calculator)
|
||||||
if self._lifecycle_state == "fresh" and self._last_price_update:
|
# Note: This updates _lifecycle_state for diagnostics only.
|
||||||
age = current_time - self._last_price_update
|
# The lifecycle sensor calculates its state dynamically in get_lifecycle_state(),
|
||||||
if age.total_seconds() > FRESH_TO_CACHED_SECONDS:
|
# checking _last_price_update timestamp directly.
|
||||||
self._lifecycle_state = "cached"
|
if self._lifecycle_state == "fresh":
|
||||||
|
# After 5 minutes, data is considered "cached" (no longer "just fetched")
|
||||||
|
self._lifecycle_state = "cached"
|
||||||
|
|
||||||
# Update helper modules with fresh TimeService instance
|
# Update helper modules with fresh TimeService instance
|
||||||
self.api.time = self.time
|
self.api.time = self.time
|
||||||
self._data_fetcher.time = self.time
|
self._price_data_manager.time = self.time
|
||||||
self._data_transformer.time = self.time
|
self._data_transformer.time = self.time
|
||||||
self._period_calculator.time = self.time
|
self._period_calculator.time = self.time
|
||||||
|
|
||||||
# Load cache if not already loaded
|
# Load cache if not already loaded (user data only, price data is in Pool)
|
||||||
if self._cached_price_data is None and self._cached_user_data is None:
|
if self._cached_user_data is None:
|
||||||
await self.load_cache()
|
await self.load_cache()
|
||||||
|
|
||||||
# Initialize midnight handler on first run
|
# Initialize midnight handler on first run
|
||||||
|
|
@ -604,47 +717,44 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
||||||
self._api_calls_today = 0
|
self._api_calls_today = 0
|
||||||
self._last_api_call_date = current_date
|
self._last_api_call_date = current_date
|
||||||
|
|
||||||
# Track last_price_update timestamp before fetch to detect if data actually changed
|
# Set _is_fetching flag - lifecycle sensor shows "refreshing" during fetch
|
||||||
old_price_update = self._last_price_update
|
# Note: Lifecycle sensor reads this flag directly in get_lifecycle_state()
|
||||||
|
self._is_fetching = True
|
||||||
|
|
||||||
# CRITICAL: Check if we need to fetch data BEFORE starting the fetch
|
# Get current price info to check if tomorrow data already exists
|
||||||
# This allows the lifecycle sensor to show "searching_tomorrow" status
|
current_price_info = self.data.get("priceInfo", []) if self.data else []
|
||||||
# when we're actively looking for tomorrow's data after 13:00
|
|
||||||
should_update = self._data_fetcher.should_update_price_data(current_time)
|
|
||||||
|
|
||||||
# Set _is_fetching flag if we're about to fetch data
|
result, api_called = await self._price_data_manager.handle_main_entry_update(
|
||||||
# This makes the lifecycle sensor show "refreshing" status during the API call
|
|
||||||
if should_update:
|
|
||||||
self._is_fetching = True
|
|
||||||
# Immediately notify lifecycle sensor about state change
|
|
||||||
# This ensures "refreshing" or "searching_tomorrow" appears DURING the fetch
|
|
||||||
self.async_update_listeners()
|
|
||||||
|
|
||||||
result = await self._data_fetcher.handle_main_entry_update(
|
|
||||||
current_time,
|
current_time,
|
||||||
self._home_id,
|
self._home_id,
|
||||||
self._transform_data,
|
self._transform_data,
|
||||||
|
current_price_info=current_price_info,
|
||||||
)
|
)
|
||||||
|
|
||||||
# CRITICAL: Reset fetching flag AFTER data fetch completes
|
# CRITICAL: Reset fetching flag AFTER data fetch completes
|
||||||
self._is_fetching = False
|
self._is_fetching = False
|
||||||
|
|
||||||
# CRITICAL: Sync cached data after API call
|
# Sync user_data cache (price data is in IntervalPool)
|
||||||
# handle_main_entry_update() updates data_fetcher's cache, we need to sync:
|
self._cached_user_data = self._price_data_manager.cached_user_data
|
||||||
# 1. cached_user_data (for new integrations, may be fetched via update_user_data_if_needed())
|
|
||||||
# 2. cached_price_data (CRITICAL: contains tomorrow data, needed for _needs_tomorrow_data())
|
|
||||||
# 3. _last_price_update (for lifecycle tracking: cache age, fresh state detection)
|
|
||||||
self._cached_user_data = self._data_fetcher.cached_user_data
|
|
||||||
self._cached_price_data = self._data_fetcher.cached_price_data
|
|
||||||
self._last_price_update = self._data_fetcher._last_price_update # noqa: SLF001 - Sync for lifecycle tracking
|
|
||||||
|
|
||||||
# Update lifecycle tracking only if we fetched NEW data (timestamp changed)
|
# Update lifecycle tracking - ONLY if API was actually called
|
||||||
# This prevents recorder spam from state changes when returning cached data
|
# (not when returning cached data)
|
||||||
if self._last_price_update != old_price_update:
|
if api_called and result and "priceInfo" in result and len(result["priceInfo"]) > 0:
|
||||||
|
self._last_price_update = current_time # Track when data was fetched from API
|
||||||
self._api_calls_today += 1
|
self._api_calls_today += 1
|
||||||
self._lifecycle_state = "fresh" # Data just fetched
|
self._lifecycle_state = "fresh" # Data just fetched
|
||||||
# No separate lifecycle notification needed - normal async_update_listeners()
|
_LOGGER.debug(
|
||||||
# will trigger all entities (including lifecycle sensor) after this return
|
"API call completed: Fetched %d intervals, updating lifecycle to 'fresh'",
|
||||||
|
len(result["priceInfo"]),
|
||||||
|
)
|
||||||
|
# Note: _lifecycle_state is for diagnostics only.
|
||||||
|
# Lifecycle sensor calculates state dynamically from _last_price_update.
|
||||||
|
elif not api_called:
|
||||||
|
# Using cached data - lifecycle stays as is (cached/searching_tomorrow/etc.)
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Using cached data: %d intervals from pool, no API call made",
|
||||||
|
len(result.get("priceInfo", [])),
|
||||||
|
)
|
||||||
except (
|
except (
|
||||||
TibberPricesApiClientAuthenticationError,
|
TibberPricesApiClientAuthenticationError,
|
||||||
TibberPricesApiClientCommunicationError,
|
TibberPricesApiClientCommunicationError,
|
||||||
|
|
@ -652,17 +762,18 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
||||||
) as err:
|
) as err:
|
||||||
# Reset lifecycle state on error
|
# Reset lifecycle state on error
|
||||||
self._is_fetching = False
|
self._is_fetching = False
|
||||||
self._lifecycle_state = "error"
|
self._lifecycle_state = "error" # For diagnostics
|
||||||
|
# Note: Lifecycle sensor detects errors via coordinator.last_exception
|
||||||
|
|
||||||
# Track rate limit errors for repair system
|
# Track rate limit errors for repair system
|
||||||
await self._track_rate_limit_error(err)
|
await self._track_rate_limit_error(err)
|
||||||
|
|
||||||
# No separate lifecycle notification needed - error case returns data
|
# Handle API error - will re-raise as ConfigEntryAuthFailed or UpdateFailed
|
||||||
# which triggers normal async_update_listeners()
|
# Note: With IntervalPool, there's no local cache fallback here.
|
||||||
return await self._data_fetcher.handle_api_error(
|
# The Pool has its own persistence for offline recovery.
|
||||||
err,
|
await self._price_data_manager.handle_api_error(err)
|
||||||
self._transform_data,
|
# Note: handle_api_error always raises, this is never reached
|
||||||
)
|
return {} # Satisfy type checker
|
||||||
else:
|
else:
|
||||||
# Check for repair conditions after successful update
|
# Check for repair conditions after successful update
|
||||||
await self._check_repair_conditions(result, current_time)
|
await self._check_repair_conditions(result, current_time)
|
||||||
|
|
@ -692,7 +803,7 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
||||||
|
|
||||||
# 2. Tomorrow data availability (after 18:00)
|
# 2. Tomorrow data availability (after 18:00)
|
||||||
if result and "priceInfo" in result:
|
if result and "priceInfo" in result:
|
||||||
has_tomorrow_data = self._data_fetcher.has_tomorrow_data(result["priceInfo"])
|
has_tomorrow_data = self._price_data_manager.has_tomorrow_data(result["priceInfo"])
|
||||||
await self._repair_manager.check_tomorrow_data_availability(
|
await self._repair_manager.check_tomorrow_data_availability(
|
||||||
has_tomorrow_data=has_tomorrow_data,
|
has_tomorrow_data=has_tomorrow_data,
|
||||||
current_time=current_time,
|
current_time=current_time,
|
||||||
|
|
@ -702,33 +813,29 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
||||||
await self._repair_manager.clear_rate_limit_tracking()
|
await self._repair_manager.clear_rate_limit_tracking()
|
||||||
|
|
||||||
async def load_cache(self) -> None:
|
async def load_cache(self) -> None:
|
||||||
"""Load cached data from storage."""
|
"""Load cached user data from storage (price data is in IntervalPool)."""
|
||||||
await self._data_fetcher.load_cache()
|
await self._price_data_manager.load_cache()
|
||||||
# Sync legacy references
|
# Sync user data reference
|
||||||
self._cached_price_data = self._data_fetcher.cached_price_data
|
self._cached_user_data = self._price_data_manager.cached_user_data
|
||||||
self._cached_user_data = self._data_fetcher.cached_user_data
|
self._last_user_update = self._price_data_manager._last_user_update # noqa: SLF001 - Sync for lifecycle tracking
|
||||||
self._last_price_update = self._data_fetcher._last_price_update # noqa: SLF001 - Sync for lifecycle tracking
|
|
||||||
self._last_user_update = self._data_fetcher._last_user_update # noqa: SLF001 - Sync for lifecycle tracking
|
|
||||||
|
|
||||||
# CRITICAL: Restore midnight handler state from cache
|
# Note: Midnight handler state is now based on current date
|
||||||
# If cache is from today, assume turnover already happened at midnight
|
# Since price data is in IntervalPool (persistent), we just need to
|
||||||
# This allows proper turnover detection after HA restart
|
# ensure turnover doesn't happen twice if HA restarts after midnight
|
||||||
if self._last_price_update:
|
today_midnight = self.time.as_local(self.time.now()).replace(hour=0, minute=0, second=0, microsecond=0)
|
||||||
cache_date = self.time.as_local(self._last_price_update).date()
|
# Mark today's midnight as done to prevent double turnover on HA restart
|
||||||
today_date = self.time.as_local(self.time.now()).date()
|
self._midnight_handler.mark_turnover_done(today_midnight)
|
||||||
if cache_date == today_date:
|
|
||||||
# Cache is from today, so midnight turnover already happened
|
|
||||||
today_midnight = self.time.as_local(self.time.now()).replace(hour=0, minute=0, second=0, microsecond=0)
|
|
||||||
# Restore handler state: mark today's midnight as last turnover
|
|
||||||
self._midnight_handler.mark_turnover_done(today_midnight)
|
|
||||||
|
|
||||||
async def _store_cache(self) -> None:
|
async def _store_cache(self) -> None:
|
||||||
"""Store cache data."""
|
"""Store cache data (user metadata only, price data is in IntervalPool)."""
|
||||||
await self._data_fetcher.store_cache(self._midnight_handler.last_check_time)
|
await self._price_data_manager.store_cache(self._midnight_handler.last_check_time)
|
||||||
|
|
||||||
def _needs_tomorrow_data(self) -> bool:
|
def _needs_tomorrow_data(self) -> bool:
|
||||||
"""Check if tomorrow data is missing or invalid."""
|
"""Check if tomorrow data is missing or invalid."""
|
||||||
return helpers.needs_tomorrow_data(self._cached_price_data)
|
# Check self.data (from Pool) instead of _cached_price_data
|
||||||
|
if not self.data or "priceInfo" not in self.data:
|
||||||
|
return True
|
||||||
|
return helpers.needs_tomorrow_data({"price_info": self.data["priceInfo"]})
|
||||||
|
|
||||||
def _has_valid_tomorrow_data(self) -> bool:
|
def _has_valid_tomorrow_data(self) -> bool:
|
||||||
"""Check if we have valid tomorrow data (inverse of _needs_tomorrow_data)."""
|
"""Check if we have valid tomorrow data (inverse of _needs_tomorrow_data)."""
|
||||||
|
|
@ -736,12 +843,12 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
||||||
|
|
||||||
@callback
|
@callback
|
||||||
def _merge_cached_data(self) -> dict[str, Any]:
|
def _merge_cached_data(self) -> dict[str, Any]:
|
||||||
"""Merge cached data into the expected format for main entry."""
|
"""Return current data (from Pool)."""
|
||||||
if not self._cached_price_data:
|
if not self.data:
|
||||||
return {}
|
return {}
|
||||||
return self._transform_data(self._cached_price_data)
|
return self.data
|
||||||
|
|
||||||
def _get_threshold_percentages(self) -> dict[str, int]:
|
def _get_threshold_percentages(self) -> dict[str, int | float]:
|
||||||
"""Get threshold percentages from config options."""
|
"""Get threshold percentages from config options."""
|
||||||
return self._data_transformer.get_threshold_percentages()
|
return self._data_transformer.get_threshold_percentages()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,502 +0,0 @@
|
||||||
"""Data fetching logic for the coordinator."""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import asyncio
|
|
||||||
import logging
|
|
||||||
import secrets
|
|
||||||
from datetime import timedelta
|
|
||||||
from typing import TYPE_CHECKING, Any
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.api import (
|
|
||||||
TibberPricesApiClientAuthenticationError,
|
|
||||||
TibberPricesApiClientCommunicationError,
|
|
||||||
TibberPricesApiClientError,
|
|
||||||
)
|
|
||||||
from homeassistant.core import callback
|
|
||||||
from homeassistant.exceptions import ConfigEntryAuthFailed
|
|
||||||
from homeassistant.helpers.update_coordinator import UpdateFailed
|
|
||||||
|
|
||||||
from . import cache, helpers
|
|
||||||
from .constants import TOMORROW_DATA_CHECK_HOUR, TOMORROW_DATA_RANDOM_DELAY_MAX
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from collections.abc import Callable
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.api import TibberPricesApiClient
|
|
||||||
|
|
||||||
from .time_service import TibberPricesTimeService
|
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class TibberPricesDataFetcher:
|
|
||||||
"""Handles data fetching, caching, and main/subentry coordination."""
|
|
||||||
|
|
||||||
def __init__( # noqa: PLR0913
|
|
||||||
self,
|
|
||||||
api: TibberPricesApiClient,
|
|
||||||
store: Any,
|
|
||||||
log_prefix: str,
|
|
||||||
user_update_interval: timedelta,
|
|
||||||
time: TibberPricesTimeService,
|
|
||||||
home_id: str,
|
|
||||||
) -> None:
|
|
||||||
"""Initialize the data fetcher."""
|
|
||||||
self.api = api
|
|
||||||
self._store = store
|
|
||||||
self._log_prefix = log_prefix
|
|
||||||
self._user_update_interval = user_update_interval
|
|
||||||
self.time: TibberPricesTimeService = time
|
|
||||||
self.home_id = home_id
|
|
||||||
|
|
||||||
# Cached data
|
|
||||||
self._cached_price_data: dict[str, Any] | None = None
|
|
||||||
self._cached_user_data: dict[str, Any] | None = None
|
|
||||||
self._last_price_update: datetime | None = None
|
|
||||||
self._last_user_update: datetime | None = None
|
|
||||||
|
|
||||||
def _log(self, level: str, message: str, *args: object, **kwargs: object) -> None:
|
|
||||||
"""Log with coordinator-specific prefix."""
|
|
||||||
prefixed_message = f"{self._log_prefix} {message}"
|
|
||||||
getattr(_LOGGER, level)(prefixed_message, *args, **kwargs)
|
|
||||||
|
|
||||||
async def load_cache(self) -> None:
|
|
||||||
"""Load cached data from storage."""
|
|
||||||
cache_data = await cache.load_cache(self._store, self._log_prefix, time=self.time)
|
|
||||||
|
|
||||||
self._cached_price_data = cache_data.price_data
|
|
||||||
self._cached_user_data = cache_data.user_data
|
|
||||||
self._last_price_update = cache_data.last_price_update
|
|
||||||
self._last_user_update = cache_data.last_user_update
|
|
||||||
|
|
||||||
# Parse timestamps if we loaded price data from cache
|
|
||||||
if self._cached_price_data:
|
|
||||||
self._cached_price_data = helpers.parse_all_timestamps(self._cached_price_data, time=self.time)
|
|
||||||
|
|
||||||
# Validate cache: check if price data is from a previous day
|
|
||||||
if not cache.is_cache_valid(cache_data, self._log_prefix, time=self.time):
|
|
||||||
self._log("info", "Cached price data is from a previous day, clearing cache to fetch fresh data")
|
|
||||||
self._cached_price_data = None
|
|
||||||
self._last_price_update = None
|
|
||||||
await self.store_cache()
|
|
||||||
|
|
||||||
async def store_cache(self, last_midnight_check: datetime | None = None) -> None:
|
|
||||||
"""Store cache data."""
|
|
||||||
cache_data = cache.TibberPricesCacheData(
|
|
||||||
price_data=self._cached_price_data,
|
|
||||||
user_data=self._cached_user_data,
|
|
||||||
last_price_update=self._last_price_update,
|
|
||||||
last_user_update=self._last_user_update,
|
|
||||||
last_midnight_check=last_midnight_check,
|
|
||||||
)
|
|
||||||
await cache.save_cache(self._store, cache_data, self._log_prefix)
|
|
||||||
|
|
||||||
def _validate_user_data(self, user_data: dict, home_id: str) -> bool: # noqa: PLR0911
|
|
||||||
"""
|
|
||||||
Validate user data completeness.
|
|
||||||
|
|
||||||
Rejects incomplete/invalid data from API to prevent caching temporary errors.
|
|
||||||
Currency information is critical - if missing, we cannot safely calculate prices.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
user_data: User data dict from API.
|
|
||||||
home_id: Home ID to validate against.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if data is valid and complete, False otherwise.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if not user_data:
|
|
||||||
self._log("warning", "User data validation failed: Empty data")
|
|
||||||
return False
|
|
||||||
|
|
||||||
viewer = user_data.get("viewer")
|
|
||||||
if not viewer or not isinstance(viewer, dict):
|
|
||||||
self._log("warning", "User data validation failed: Missing or invalid viewer")
|
|
||||||
return False
|
|
||||||
|
|
||||||
homes = viewer.get("homes")
|
|
||||||
if not homes or not isinstance(homes, list) or len(homes) == 0:
|
|
||||||
self._log("warning", "User data validation failed: No homes found")
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Find our home and validate it has required data
|
|
||||||
home_found = False
|
|
||||||
for home in homes:
|
|
||||||
if home.get("id") == home_id:
|
|
||||||
home_found = True
|
|
||||||
|
|
||||||
# Validate home has timezone (required for cursor calculation)
|
|
||||||
if not home.get("timeZone"):
|
|
||||||
self._log("warning", "User data validation failed: Home %s missing timezone", home_id)
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Currency is critical - if home has subscription, must have currency
|
|
||||||
subscription = home.get("currentSubscription")
|
|
||||||
if subscription and subscription is not None:
|
|
||||||
price_info = subscription.get("priceInfo")
|
|
||||||
if price_info and price_info is not None:
|
|
||||||
current = price_info.get("current")
|
|
||||||
if current and current is not None:
|
|
||||||
currency = current.get("currency")
|
|
||||||
if not currency:
|
|
||||||
self._log(
|
|
||||||
"warning",
|
|
||||||
"User data validation failed: Home %s has subscription but no currency",
|
|
||||||
home_id,
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
break
|
|
||||||
|
|
||||||
if not home_found:
|
|
||||||
self._log("warning", "User data validation failed: Home %s not found in homes list", home_id)
|
|
||||||
return False
|
|
||||||
|
|
||||||
self._log("debug", "User data validation passed for home %s", home_id)
|
|
||||||
return True
|
|
||||||
|
|
||||||
async def update_user_data_if_needed(self, current_time: datetime) -> bool:
|
|
||||||
"""
|
|
||||||
Update user data if needed (daily check).
|
|
||||||
|
|
||||||
Only accepts complete and valid data. If API returns incomplete data
|
|
||||||
(e.g., during maintenance), keeps existing cached data and retries later.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if user data was updated, False otherwise
|
|
||||||
|
|
||||||
"""
|
|
||||||
if self._last_user_update is None or current_time - self._last_user_update >= self._user_update_interval:
|
|
||||||
try:
|
|
||||||
self._log("debug", "Updating user data")
|
|
||||||
user_data = await self.api.async_get_viewer_details()
|
|
||||||
|
|
||||||
# Validate before caching
|
|
||||||
if not self._validate_user_data(user_data, self.home_id):
|
|
||||||
self._log(
|
|
||||||
"warning",
|
|
||||||
"Rejecting incomplete user data from API - keeping existing cached data",
|
|
||||||
)
|
|
||||||
return False # Keep existing data, don't update timestamp
|
|
||||||
|
|
||||||
# Data is valid, cache it
|
|
||||||
self._cached_user_data = user_data
|
|
||||||
self._last_user_update = current_time
|
|
||||||
self._log("debug", "User data updated successfully")
|
|
||||||
except (
|
|
||||||
TibberPricesApiClientError,
|
|
||||||
TibberPricesApiClientCommunicationError,
|
|
||||||
) as ex:
|
|
||||||
self._log("warning", "Failed to update user data: %s", ex)
|
|
||||||
return False # Update failed
|
|
||||||
else:
|
|
||||||
return True # User data was updated
|
|
||||||
return False # No update needed
|
|
||||||
|
|
||||||
@callback
|
|
||||||
def should_update_price_data(self, current_time: datetime) -> bool | str:
|
|
||||||
"""
|
|
||||||
Check if price data should be updated from the API.
|
|
||||||
|
|
||||||
API calls only happen when truly needed:
|
|
||||||
1. No cached data exists
|
|
||||||
2. Cache is invalid (from previous day - detected by _is_cache_valid)
|
|
||||||
3. After 13:00 local time and tomorrow's data is missing or invalid
|
|
||||||
|
|
||||||
Cache validity is ensured by:
|
|
||||||
- _is_cache_valid() checks date mismatch on load
|
|
||||||
- Midnight turnover clears cache (Timer #2)
|
|
||||||
- Tomorrow data validation after 13:00
|
|
||||||
|
|
||||||
No periodic "safety" updates - trust the cache validation!
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool or str: True for immediate update, "tomorrow_check" for tomorrow
|
|
||||||
data check (needs random delay), False for no update
|
|
||||||
|
|
||||||
"""
|
|
||||||
if self._cached_price_data is None:
|
|
||||||
self._log("debug", "API update needed: No cached price data")
|
|
||||||
return True
|
|
||||||
if self._last_price_update is None:
|
|
||||||
self._log("debug", "API update needed: No last price update timestamp")
|
|
||||||
return True
|
|
||||||
|
|
||||||
# Check if after 13:00 and tomorrow data is missing or invalid
|
|
||||||
now_local = self.time.as_local(current_time)
|
|
||||||
if now_local.hour >= TOMORROW_DATA_CHECK_HOUR and self._cached_price_data and self.needs_tomorrow_data():
|
|
||||||
self._log(
|
|
||||||
"info",
|
|
||||||
"API update needed: After %s:00 and tomorrow's data missing/invalid",
|
|
||||||
TOMORROW_DATA_CHECK_HOUR,
|
|
||||||
)
|
|
||||||
# Return special marker to indicate this is a tomorrow data check
|
|
||||||
# Caller should add random delay to spread load
|
|
||||||
return "tomorrow_check"
|
|
||||||
|
|
||||||
# No update needed - cache is valid and complete
|
|
||||||
self._log("debug", "No API update needed: Cache is valid and complete")
|
|
||||||
return False
|
|
||||||
|
|
||||||
def needs_tomorrow_data(self) -> bool:
|
|
||||||
"""Check if tomorrow data is missing or invalid."""
|
|
||||||
return helpers.needs_tomorrow_data(self._cached_price_data)
|
|
||||||
|
|
||||||
async def fetch_home_data(self, home_id: str, current_time: datetime) -> dict[str, Any]:
|
|
||||||
"""Fetch data for a single home."""
|
|
||||||
if not home_id:
|
|
||||||
self._log("warning", "No home ID provided - cannot fetch price data")
|
|
||||||
return {
|
|
||||||
"timestamp": current_time,
|
|
||||||
"home_id": "",
|
|
||||||
"price_info": [],
|
|
||||||
"currency": "EUR",
|
|
||||||
}
|
|
||||||
|
|
||||||
# Ensure we have user_data before fetching price data
|
|
||||||
# This is critical for timezone-aware cursor calculation
|
|
||||||
if not self._cached_user_data:
|
|
||||||
self._log("info", "User data not cached, fetching before price data")
|
|
||||||
try:
|
|
||||||
user_data = await self.api.async_get_viewer_details()
|
|
||||||
|
|
||||||
# Validate data before accepting it (especially on initial setup)
|
|
||||||
if not self._validate_user_data(user_data, self.home_id):
|
|
||||||
msg = "Received incomplete user data from API - cannot proceed with price fetching"
|
|
||||||
self._log("error", msg)
|
|
||||||
raise TibberPricesApiClientError(msg) # noqa: TRY301
|
|
||||||
|
|
||||||
self._cached_user_data = user_data
|
|
||||||
self._last_user_update = current_time
|
|
||||||
except (
|
|
||||||
TibberPricesApiClientError,
|
|
||||||
TibberPricesApiClientCommunicationError,
|
|
||||||
) as ex:
|
|
||||||
msg = f"Failed to fetch user data (required for price fetching): {ex}"
|
|
||||||
self._log("error", msg)
|
|
||||||
raise TibberPricesApiClientError(msg) from ex
|
|
||||||
|
|
||||||
# Get price data for this home
|
|
||||||
# Pass user_data for timezone-aware cursor calculation
|
|
||||||
# At this point, _cached_user_data is guaranteed to be not None (checked above)
|
|
||||||
if not self._cached_user_data:
|
|
||||||
msg = "User data unexpectedly None after fetch attempt"
|
|
||||||
raise TibberPricesApiClientError(msg)
|
|
||||||
|
|
||||||
self._log("debug", "Fetching price data for home %s", home_id)
|
|
||||||
home_data = await self.api.async_get_price_info(
|
|
||||||
home_id=home_id,
|
|
||||||
user_data=self._cached_user_data,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Extract currency for this home from user_data
|
|
||||||
currency = self._get_currency_for_home(home_id)
|
|
||||||
|
|
||||||
price_info = home_data.get("price_info", [])
|
|
||||||
|
|
||||||
self._log("debug", "Successfully fetched data for home %s (%d intervals)", home_id, len(price_info))
|
|
||||||
|
|
||||||
return {
|
|
||||||
"timestamp": current_time,
|
|
||||||
"home_id": home_id,
|
|
||||||
"price_info": price_info,
|
|
||||||
"currency": currency,
|
|
||||||
}
|
|
||||||
|
|
||||||
def _get_currency_for_home(self, home_id: str) -> str:
|
|
||||||
"""
|
|
||||||
Get currency for a specific home from cached user_data.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Currency code (e.g., "EUR", "NOK", "SEK").
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
TibberPricesApiClientError: If currency cannot be determined.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if not self._cached_user_data:
|
|
||||||
msg = "No user data cached - cannot determine currency"
|
|
||||||
self._log("error", msg)
|
|
||||||
raise TibberPricesApiClientError(msg)
|
|
||||||
|
|
||||||
viewer = self._cached_user_data.get("viewer", {})
|
|
||||||
homes = viewer.get("homes", [])
|
|
||||||
|
|
||||||
for home in homes:
|
|
||||||
if home.get("id") == home_id:
|
|
||||||
# Extract currency from nested structure
|
|
||||||
# Use 'or {}' to handle None values (homes without active subscription)
|
|
||||||
subscription = home.get("currentSubscription") or {}
|
|
||||||
price_info = subscription.get("priceInfo") or {}
|
|
||||||
current = price_info.get("current") or {}
|
|
||||||
currency = current.get("currency")
|
|
||||||
|
|
||||||
if not currency:
|
|
||||||
# Home without active subscription - cannot determine currency
|
|
||||||
msg = f"Home {home_id} has no active subscription - currency unavailable"
|
|
||||||
self._log("error", msg)
|
|
||||||
raise TibberPricesApiClientError(msg)
|
|
||||||
|
|
||||||
self._log("debug", "Extracted currency %s for home %s", currency, home_id)
|
|
||||||
return currency
|
|
||||||
|
|
||||||
# Home not found in cached data - data validation should have caught this
|
|
||||||
msg = f"Home {home_id} not found in user data - data validation failed"
|
|
||||||
self._log("error", msg)
|
|
||||||
raise TibberPricesApiClientError(msg)
|
|
||||||
|
|
||||||
def _check_home_exists(self, home_id: str) -> bool:
|
|
||||||
"""
|
|
||||||
Check if a home ID exists in cached user data.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
home_id: The home ID to check.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if home exists, False otherwise.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if not self._cached_user_data:
|
|
||||||
# No user data yet - assume home exists (will be checked on next update)
|
|
||||||
return True
|
|
||||||
|
|
||||||
viewer = self._cached_user_data.get("viewer", {})
|
|
||||||
homes = viewer.get("homes", [])
|
|
||||||
|
|
||||||
return any(home.get("id") == home_id for home in homes)
|
|
||||||
|
|
||||||
async def handle_main_entry_update(
|
|
||||||
self,
|
|
||||||
current_time: datetime,
|
|
||||||
home_id: str,
|
|
||||||
transform_fn: Callable[[dict[str, Any]], dict[str, Any]],
|
|
||||||
) -> dict[str, Any]:
|
|
||||||
"""Handle update for main entry - fetch data for this home."""
|
|
||||||
# Update user data if needed (daily check)
|
|
||||||
user_data_updated = await self.update_user_data_if_needed(current_time)
|
|
||||||
|
|
||||||
# Check if this home still exists in user data after update
|
|
||||||
# This detects when a home was removed from the Tibber account
|
|
||||||
home_exists = self._check_home_exists(home_id)
|
|
||||||
if not home_exists:
|
|
||||||
self._log("warning", "Home ID %s not found in Tibber account", home_id)
|
|
||||||
# Return a special marker in the result that coordinator can check
|
|
||||||
# We still need to return valid data to avoid coordinator errors
|
|
||||||
result = transform_fn(self._cached_price_data or {})
|
|
||||||
result["_home_not_found"] = True # Special marker for coordinator
|
|
||||||
return result
|
|
||||||
|
|
||||||
# Check if we need to update price data
|
|
||||||
should_update = self.should_update_price_data(current_time)
|
|
||||||
|
|
||||||
if should_update:
|
|
||||||
# If this is a tomorrow data check, add random delay to spread API load
|
|
||||||
if should_update == "tomorrow_check":
|
|
||||||
# Use secrets for better randomness distribution
|
|
||||||
delay = secrets.randbelow(TOMORROW_DATA_RANDOM_DELAY_MAX + 1)
|
|
||||||
self._log(
|
|
||||||
"debug",
|
|
||||||
"Tomorrow data check - adding random delay of %d seconds to spread load",
|
|
||||||
delay,
|
|
||||||
)
|
|
||||||
await asyncio.sleep(delay)
|
|
||||||
|
|
||||||
self._log("debug", "Fetching fresh price data from API")
|
|
||||||
raw_data = await self.fetch_home_data(home_id, current_time)
|
|
||||||
# Parse timestamps immediately after API fetch
|
|
||||||
raw_data = helpers.parse_all_timestamps(raw_data, time=self.time)
|
|
||||||
# Cache the data (now with datetime objects)
|
|
||||||
self._cached_price_data = raw_data
|
|
||||||
self._last_price_update = current_time
|
|
||||||
await self.store_cache()
|
|
||||||
# Transform for main entry
|
|
||||||
return transform_fn(raw_data)
|
|
||||||
|
|
||||||
# Use cached data if available
|
|
||||||
if self._cached_price_data is not None:
|
|
||||||
# If user data was updated, we need to return transformed data to trigger entity updates
|
|
||||||
# This ensures diagnostic sensors (home_type, grid_company, etc.) get refreshed
|
|
||||||
if user_data_updated:
|
|
||||||
self._log("debug", "User data updated - returning transformed data to update diagnostic sensors")
|
|
||||||
else:
|
|
||||||
self._log("debug", "Using cached price data (no API call needed)")
|
|
||||||
return transform_fn(self._cached_price_data)
|
|
||||||
|
|
||||||
# Fallback: no cache and no update needed (shouldn't happen)
|
|
||||||
self._log("warning", "No cached data available and update not triggered - returning empty data")
|
|
||||||
return {
|
|
||||||
"timestamp": current_time,
|
|
||||||
"home_id": home_id,
|
|
||||||
"priceInfo": [],
|
|
||||||
"currency": "",
|
|
||||||
}
|
|
||||||
|
|
||||||
async def handle_api_error(
|
|
||||||
self,
|
|
||||||
error: Exception,
|
|
||||||
transform_fn: Callable[[dict[str, Any]], dict[str, Any]],
|
|
||||||
) -> dict[str, Any]:
|
|
||||||
"""Handle API errors with fallback to cached data."""
|
|
||||||
if isinstance(error, TibberPricesApiClientAuthenticationError):
|
|
||||||
msg = "Invalid access token"
|
|
||||||
raise ConfigEntryAuthFailed(msg) from error
|
|
||||||
|
|
||||||
# Use cached data as fallback if available
|
|
||||||
if self._cached_price_data is not None:
|
|
||||||
self._log("warning", "API error, using cached data: %s", error)
|
|
||||||
return transform_fn(self._cached_price_data)
|
|
||||||
|
|
||||||
msg = f"Error communicating with API: {error}"
|
|
||||||
raise UpdateFailed(msg) from error
|
|
||||||
|
|
||||||
@property
|
|
||||||
def cached_price_data(self) -> dict[str, Any] | None:
|
|
||||||
"""Get cached price data."""
|
|
||||||
return self._cached_price_data
|
|
||||||
|
|
||||||
@cached_price_data.setter
|
|
||||||
def cached_price_data(self, value: dict[str, Any] | None) -> None:
|
|
||||||
"""Set cached price data."""
|
|
||||||
self._cached_price_data = value
|
|
||||||
|
|
||||||
@property
|
|
||||||
def cached_user_data(self) -> dict[str, Any] | None:
|
|
||||||
"""Get cached user data."""
|
|
||||||
return self._cached_user_data
|
|
||||||
|
|
||||||
def has_tomorrow_data(self, price_info: list[dict[str, Any]]) -> bool:
|
|
||||||
"""
|
|
||||||
Check if tomorrow's price data is available.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
price_info: List of price intervals from coordinator data.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if at least one interval from tomorrow is present.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if not price_info:
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Get tomorrow's date
|
|
||||||
now = self.time.now()
|
|
||||||
tomorrow = (self.time.as_local(now) + timedelta(days=1)).date()
|
|
||||||
|
|
||||||
# Check if any interval is from tomorrow
|
|
||||||
for interval in price_info:
|
|
||||||
if "startsAt" not in interval:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# startsAt is already a datetime object after _transform_data()
|
|
||||||
interval_time = interval["startsAt"]
|
|
||||||
if isinstance(interval_time, str):
|
|
||||||
# Fallback: parse if still string (shouldn't happen with transformed data)
|
|
||||||
interval_time = self.time.parse_datetime(interval_time)
|
|
||||||
|
|
||||||
if interval_time and self.time.as_local(interval_time).date() == tomorrow:
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import copy
|
||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING, Any
|
from typing import TYPE_CHECKING, Any
|
||||||
|
|
||||||
|
|
@ -48,19 +49,50 @@ class TibberPricesDataTransformer:
|
||||||
prefixed_message = f"{self._log_prefix} {message}"
|
prefixed_message = f"{self._log_prefix} {message}"
|
||||||
getattr(_LOGGER, level)(prefixed_message, *args, **kwargs)
|
getattr(_LOGGER, level)(prefixed_message, *args, **kwargs)
|
||||||
|
|
||||||
def get_threshold_percentages(self) -> dict[str, int]:
|
def get_threshold_percentages(self) -> dict[str, int | float]:
|
||||||
"""Get threshold percentages from config options."""
|
"""
|
||||||
|
Get threshold percentages, hysteresis and gap tolerance for RATING_LEVEL from config options.
|
||||||
|
|
||||||
|
CRITICAL: This function is ONLY for rating_level (internal calculation: LOW/NORMAL/HIGH).
|
||||||
|
Do NOT use for price level (Tibber API: VERY_CHEAP/CHEAP/NORMAL/EXPENSIVE/VERY_EXPENSIVE).
|
||||||
|
"""
|
||||||
options = self.config_entry.options or {}
|
options = self.config_entry.options or {}
|
||||||
return {
|
return {
|
||||||
"low": options.get(_const.CONF_PRICE_RATING_THRESHOLD_LOW, _const.DEFAULT_PRICE_RATING_THRESHOLD_LOW),
|
"low": options.get(_const.CONF_PRICE_RATING_THRESHOLD_LOW, _const.DEFAULT_PRICE_RATING_THRESHOLD_LOW),
|
||||||
"high": options.get(_const.CONF_PRICE_RATING_THRESHOLD_HIGH, _const.DEFAULT_PRICE_RATING_THRESHOLD_HIGH),
|
"high": options.get(_const.CONF_PRICE_RATING_THRESHOLD_HIGH, _const.DEFAULT_PRICE_RATING_THRESHOLD_HIGH),
|
||||||
|
"hysteresis": options.get(_const.CONF_PRICE_RATING_HYSTERESIS, _const.DEFAULT_PRICE_RATING_HYSTERESIS),
|
||||||
|
"gap_tolerance": options.get(
|
||||||
|
_const.CONF_PRICE_RATING_GAP_TOLERANCE, _const.DEFAULT_PRICE_RATING_GAP_TOLERANCE
|
||||||
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def get_level_gap_tolerance(self) -> int:
|
||||||
|
"""
|
||||||
|
Get gap tolerance for PRICE LEVEL (Tibber API) from config options.
|
||||||
|
|
||||||
|
CRITICAL: This is separate from rating_level gap tolerance.
|
||||||
|
Price level comes from Tibber API (VERY_CHEAP/CHEAP/NORMAL/EXPENSIVE/VERY_EXPENSIVE).
|
||||||
|
Rating level is calculated internally (LOW/NORMAL/HIGH).
|
||||||
|
"""
|
||||||
|
options = self.config_entry.options or {}
|
||||||
|
return options.get(_const.CONF_PRICE_LEVEL_GAP_TOLERANCE, _const.DEFAULT_PRICE_LEVEL_GAP_TOLERANCE)
|
||||||
|
|
||||||
def invalidate_config_cache(self) -> None:
|
def invalidate_config_cache(self) -> None:
|
||||||
"""Invalidate config cache when options change."""
|
"""
|
||||||
|
Invalidate config cache AND transformation cache when options change.
|
||||||
|
|
||||||
|
CRITICAL: When options like gap_tolerance, hysteresis, or price_level_gap_tolerance
|
||||||
|
change, we must clear BOTH caches:
|
||||||
|
1. Config cache (_config_cache) - forces config rebuild on next check
|
||||||
|
2. Transformation cache (_cached_transformed_data) - forces data re-enrichment
|
||||||
|
|
||||||
|
This ensures that the next call to transform_data() will re-calculate
|
||||||
|
rating_levels and apply new gap tolerance settings to existing price data.
|
||||||
|
"""
|
||||||
self._config_cache_valid = False
|
self._config_cache_valid = False
|
||||||
self._config_cache = None
|
self._config_cache = None
|
||||||
self._log("debug", "Config cache invalidated")
|
self._cached_transformed_data = None # Force re-transformation with new config
|
||||||
|
self._last_transformation_config = None # Force config comparison to trigger
|
||||||
|
|
||||||
def _get_current_transformation_config(self) -> dict[str, Any]:
|
def _get_current_transformation_config(self) -> dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
|
|
@ -85,6 +117,7 @@ class TibberPricesDataTransformer:
|
||||||
|
|
||||||
config = {
|
config = {
|
||||||
"thresholds": self.get_threshold_percentages(),
|
"thresholds": self.get_threshold_percentages(),
|
||||||
|
"level_gap_tolerance": self.get_level_gap_tolerance(), # Separate: Tibber's price level smoothing
|
||||||
# Volatility thresholds now flat (single-section step)
|
# Volatility thresholds now flat (single-section step)
|
||||||
"volatility_thresholds": {
|
"volatility_thresholds": {
|
||||||
"moderate": options.get(_const.CONF_VOLATILITY_THRESHOLD_MODERATE, 15.0),
|
"moderate": options.get(_const.CONF_VOLATILITY_THRESHOLD_MODERATE, 15.0),
|
||||||
|
|
@ -151,8 +184,9 @@ class TibberPricesDataTransformer:
|
||||||
|
|
||||||
# Configuration changed - must retransform
|
# Configuration changed - must retransform
|
||||||
current_config = self._get_current_transformation_config()
|
current_config = self._get_current_transformation_config()
|
||||||
if current_config != self._last_transformation_config:
|
config_changed = current_config != self._last_transformation_config
|
||||||
self._log("debug", "Configuration changed, retransforming data")
|
|
||||||
|
if config_changed:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# Check for midnight turnover
|
# Check for midnight turnover
|
||||||
|
|
@ -177,18 +211,29 @@ class TibberPricesDataTransformer:
|
||||||
source_data_timestamp = raw_data.get("timestamp")
|
source_data_timestamp = raw_data.get("timestamp")
|
||||||
|
|
||||||
# Return cached transformed data if no retransformation needed
|
# Return cached transformed data if no retransformation needed
|
||||||
if (
|
should_retransform = self._should_retransform_data(current_time, source_data_timestamp)
|
||||||
not self._should_retransform_data(current_time, source_data_timestamp)
|
has_cache = self._cached_transformed_data is not None
|
||||||
and self._cached_transformed_data is not None
|
|
||||||
):
|
self._log(
|
||||||
|
"info",
|
||||||
|
"transform_data: should_retransform=%s, has_cache=%s",
|
||||||
|
should_retransform,
|
||||||
|
has_cache,
|
||||||
|
)
|
||||||
|
|
||||||
|
if not should_retransform and has_cache:
|
||||||
self._log("debug", "Using cached transformed data (no transformation needed)")
|
self._log("debug", "Using cached transformed data (no transformation needed)")
|
||||||
return self._cached_transformed_data
|
# has_cache ensures _cached_transformed_data is not None
|
||||||
|
return self._cached_transformed_data # type: ignore[return-value]
|
||||||
|
|
||||||
self._log("debug", "Transforming price data (enrichment + period calculation)")
|
self._log("debug", "Transforming price data (enrichment + period calculation)")
|
||||||
|
|
||||||
# Extract data from single-home structure
|
# Extract data from single-home structure
|
||||||
home_id = raw_data.get("home_id", "")
|
home_id = raw_data.get("home_id", "")
|
||||||
all_intervals = raw_data.get("price_info", [])
|
# CRITICAL: Make a deep copy of intervals to avoid modifying cached raw data
|
||||||
|
# The enrichment function modifies intervals in-place, which would corrupt
|
||||||
|
# the original API data and make re-enrichment with different settings impossible
|
||||||
|
all_intervals = copy.deepcopy(raw_data.get("price_info", []))
|
||||||
currency = raw_data.get("currency", "EUR")
|
currency = raw_data.get("currency", "EUR")
|
||||||
|
|
||||||
if not all_intervals:
|
if not all_intervals:
|
||||||
|
|
@ -205,11 +250,16 @@ class TibberPricesDataTransformer:
|
||||||
|
|
||||||
# Enrich price info dynamically with calculated differences and rating levels
|
# Enrich price info dynamically with calculated differences and rating levels
|
||||||
# (Modifies all_intervals in-place, returns same list)
|
# (Modifies all_intervals in-place, returns same list)
|
||||||
thresholds = self.get_threshold_percentages()
|
thresholds = self.get_threshold_percentages() # Only for rating_level
|
||||||
|
level_gap_tolerance = self.get_level_gap_tolerance() # Separate: for Tibber's price level
|
||||||
|
|
||||||
enriched_intervals = enrich_price_info_with_differences(
|
enriched_intervals = enrich_price_info_with_differences(
|
||||||
all_intervals,
|
all_intervals,
|
||||||
threshold_low=thresholds["low"],
|
threshold_low=thresholds["low"],
|
||||||
threshold_high=thresholds["high"],
|
threshold_high=thresholds["high"],
|
||||||
|
hysteresis=float(thresholds["hysteresis"]),
|
||||||
|
gap_tolerance=int(thresholds["gap_tolerance"]),
|
||||||
|
level_gap_tolerance=level_gap_tolerance,
|
||||||
time=self.time,
|
time=self.time,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -16,8 +16,10 @@ from .period_building import (
|
||||||
add_interval_ends,
|
add_interval_ends,
|
||||||
build_periods,
|
build_periods,
|
||||||
calculate_reference_prices,
|
calculate_reference_prices,
|
||||||
|
extend_periods_across_midnight,
|
||||||
filter_periods_by_end_date,
|
filter_periods_by_end_date,
|
||||||
filter_periods_by_min_length,
|
filter_periods_by_min_length,
|
||||||
|
filter_superseded_periods,
|
||||||
split_intervals_by_day,
|
split_intervals_by_day,
|
||||||
)
|
)
|
||||||
from .period_statistics import (
|
from .period_statistics import (
|
||||||
|
|
@ -188,7 +190,7 @@ def calculate_periods(
|
||||||
# Sensors filter further for today+tomorrow, services can access all cached periods
|
# Sensors filter further for today+tomorrow, services can access all cached periods
|
||||||
raw_periods = filter_periods_by_end_date(raw_periods, time=time)
|
raw_periods = filter_periods_by_end_date(raw_periods, time=time)
|
||||||
|
|
||||||
# Step 8: Extract lightweight period summaries (no full price data)
|
# Step 7: Extract lightweight period summaries (no full price data)
|
||||||
# Note: Periods are filtered by end date to keep yesterday/today/tomorrow.
|
# Note: Periods are filtered by end date to keep yesterday/today/tomorrow.
|
||||||
# This preserves periods that started day-before-yesterday but end yesterday.
|
# This preserves periods that started day-before-yesterday but end yesterday.
|
||||||
thresholds = TibberPricesThresholdConfig(
|
thresholds = TibberPricesThresholdConfig(
|
||||||
|
|
@ -207,6 +209,26 @@ def calculate_periods(
|
||||||
time=time,
|
time=time,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Step 8: Cross-day extension for late-night periods
|
||||||
|
# If a best-price period ends near midnight and tomorrow has continued low prices,
|
||||||
|
# extend the period across midnight to give users the full cheap window
|
||||||
|
period_summaries = extend_periods_across_midnight(
|
||||||
|
period_summaries,
|
||||||
|
all_prices_sorted,
|
||||||
|
price_context,
|
||||||
|
time=time,
|
||||||
|
reverse_sort=reverse_sort,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Step 9: Filter superseded periods
|
||||||
|
# When tomorrow data is available, late-night today periods that were found via
|
||||||
|
# relaxation may be obsolete if tomorrow has significantly better alternatives
|
||||||
|
period_summaries = filter_superseded_periods(
|
||||||
|
period_summaries,
|
||||||
|
time=time,
|
||||||
|
reverse_sort=reverse_sort,
|
||||||
|
)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"periods": period_summaries, # Lightweight summaries only
|
"periods": period_summaries, # Lightweight summaries only
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
|
|
||||||
|
|
@ -155,9 +155,12 @@ def check_interval_criteria(
|
||||||
in_flex = price >= flex_threshold
|
in_flex = price >= flex_threshold
|
||||||
else:
|
else:
|
||||||
# Best price: accept prices <= (ref_price + flex_amount)
|
# Best price: accept prices <= (ref_price + flex_amount)
|
||||||
# Prices must be CLOSE TO or AT the minimum
|
# Accept ALL low prices up to the flex threshold, not just those >= minimum
|
||||||
|
# This ensures that if there are multiple low-price intervals, all that meet
|
||||||
|
# the threshold are included, regardless of whether they're before or after
|
||||||
|
# the daily minimum in the chronological sequence.
|
||||||
flex_threshold = criteria.ref_price + flex_amount
|
flex_threshold = criteria.ref_price + flex_amount
|
||||||
in_flex = price >= criteria.ref_price and price <= flex_threshold
|
in_flex = price <= flex_threshold
|
||||||
|
|
||||||
# ============================================================
|
# ============================================================
|
||||||
# MIN_DISTANCE FILTER: Check if price is far enough from average
|
# MIN_DISTANCE FILTER: Check if price is far enough from average
|
||||||
|
|
@ -181,7 +184,7 @@ def check_interval_criteria(
|
||||||
if scale_factor < SCALE_FACTOR_WARNING_THRESHOLD:
|
if scale_factor < SCALE_FACTOR_WARNING_THRESHOLD:
|
||||||
import logging # noqa: PLC0415
|
import logging # noqa: PLC0415
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__) # noqa: N806
|
_LOGGER = logging.getLogger(f"{__name__}.details") # noqa: N806
|
||||||
_LOGGER.debug(
|
_LOGGER.debug(
|
||||||
"High flex %.1f%% detected: Reducing min_distance %.1f%% → %.1f%% (scale %.2f)",
|
"High flex %.1f%% detected: Reducing min_distance %.1f%% → %.1f%% (scale %.2f)",
|
||||||
flex_abs * 100,
|
flex_abs * 100,
|
||||||
|
|
|
||||||
|
|
@ -15,19 +15,34 @@ Uses statistical methods:
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
from datetime import datetime
|
||||||
from typing import NamedTuple
|
from typing import NamedTuple
|
||||||
|
|
||||||
|
from custom_components.tibber_prices.utils.price import calculate_coefficient_of_variation
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
_LOGGER = logging.getLogger(__name__)
|
||||||
_LOGGER_DETAILS = logging.getLogger(__name__ + ".details")
|
_LOGGER_DETAILS = logging.getLogger(__name__ + ".details")
|
||||||
|
|
||||||
# Outlier filtering constants
|
# Outlier filtering constants
|
||||||
MIN_CONTEXT_SIZE = 3 # Minimum intervals needed before/after for analysis
|
MIN_CONTEXT_SIZE = 3 # Minimum intervals needed before/after for analysis
|
||||||
CONFIDENCE_LEVEL = 2.0 # Standard deviations for 95% confidence interval
|
|
||||||
VOLATILITY_THRESHOLD = 0.05 # 5% max relative std dev for zigzag detection
|
VOLATILITY_THRESHOLD = 0.05 # 5% max relative std dev for zigzag detection
|
||||||
SYMMETRY_THRESHOLD = 1.5 # Max std dev difference for symmetric spike
|
SYMMETRY_THRESHOLD = 1.5 # Max std dev difference for symmetric spike
|
||||||
RELATIVE_VOLATILITY_THRESHOLD = 2.0 # Window volatility vs context (cluster detection)
|
RELATIVE_VOLATILITY_THRESHOLD = 2.0 # Window volatility vs context (cluster detection)
|
||||||
ASYMMETRY_TAIL_WINDOW = 6 # Skip asymmetry check for last ~1.5h (6 intervals) of available data
|
ASYMMETRY_TAIL_WINDOW = 6 # Skip asymmetry check for last ~1.5h (6 intervals) of available data
|
||||||
ZIGZAG_TAIL_WINDOW = 6 # Skip zigzag/cluster detection for last ~1.5h (6 intervals)
|
ZIGZAG_TAIL_WINDOW = 6 # Skip zigzag/cluster detection for last ~1.5h (6 intervals)
|
||||||
|
EXTREMES_PROTECTION_TOLERANCE = 0.001 # Protect prices within 0.1% of daily min/max from smoothing
|
||||||
|
|
||||||
|
# Adaptive confidence level constants
|
||||||
|
# Uses coefficient of variation (CV) from utils/price.py for consistency with volatility sensors
|
||||||
|
# On flat days (low CV), we're more conservative (higher confidence = fewer smoothed)
|
||||||
|
# On volatile days (high CV), we're more aggressive (lower confidence = more smoothed)
|
||||||
|
CONFIDENCE_LEVEL_MIN = 1.5 # Minimum confidence (volatile days: smooth more aggressively)
|
||||||
|
CONFIDENCE_LEVEL_MAX = 2.5 # Maximum confidence (flat days: smooth more conservatively)
|
||||||
|
CONFIDENCE_LEVEL_DEFAULT = 2.0 # Default: 95% confidence interval (2 std devs)
|
||||||
|
# CV thresholds for adaptive confidence (align with volatility sensor defaults)
|
||||||
|
# These are in percentage points (e.g., 10.0 = 10% CV)
|
||||||
|
DAILY_CV_LOW = 10.0 # ≤10% CV = flat day (use max confidence)
|
||||||
|
DAILY_CV_HIGH = 30.0 # ≥30% CV = volatile day (use min confidence)
|
||||||
|
|
||||||
# Module-local log indentation (each module starts at level 0)
|
# Module-local log indentation (each module starts at level 0)
|
||||||
INDENT_L0 = "" # All logs in this module (no indentation needed)
|
INDENT_L0 = "" # All logs in this module (no indentation needed)
|
||||||
|
|
@ -233,6 +248,166 @@ def _validate_spike_candidate(
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def _calculate_daily_extremes(intervals: list[dict]) -> dict[str, tuple[float, float]]:
|
||||||
|
"""
|
||||||
|
Calculate daily min/max prices for each day in the interval list.
|
||||||
|
|
||||||
|
These extremes are used to protect reference prices from being smoothed.
|
||||||
|
The daily minimum is the reference for best_price periods, and the daily
|
||||||
|
maximum is the reference for peak_price periods - smoothing these would
|
||||||
|
break period detection.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
intervals: List of price intervals with 'startsAt' and 'total' keys
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict mapping date strings to (min_price, max_price) tuples
|
||||||
|
|
||||||
|
"""
|
||||||
|
daily_prices: dict[str, list[float]] = {}
|
||||||
|
|
||||||
|
for interval in intervals:
|
||||||
|
starts_at = interval.get("startsAt")
|
||||||
|
if starts_at is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Handle both datetime objects and ISO strings
|
||||||
|
dt = datetime.fromisoformat(starts_at) if isinstance(starts_at, str) else starts_at
|
||||||
|
|
||||||
|
date_key = dt.strftime("%Y-%m-%d")
|
||||||
|
price = float(interval["total"])
|
||||||
|
daily_prices.setdefault(date_key, []).append(price)
|
||||||
|
|
||||||
|
# Calculate min/max for each day
|
||||||
|
return {date_key: (min(prices), max(prices)) for date_key, prices in daily_prices.items()}
|
||||||
|
|
||||||
|
|
||||||
|
def _calculate_daily_cv(intervals: list[dict]) -> dict[str, float]:
|
||||||
|
"""
|
||||||
|
Calculate daily coefficient of variation (CV) for each day.
|
||||||
|
|
||||||
|
Uses the same CV calculation as volatility sensors for consistency.
|
||||||
|
CV = (std_dev / mean) * 100, expressed as percentage.
|
||||||
|
|
||||||
|
Used to adapt the confidence level for outlier detection:
|
||||||
|
- Flat days (low CV): Higher confidence → fewer false positives
|
||||||
|
- Volatile days (high CV): Lower confidence → catch more real outliers
|
||||||
|
|
||||||
|
Args:
|
||||||
|
intervals: List of price intervals with 'startsAt' and 'total' keys
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict mapping date strings to CV percentage (e.g., 15.0 for 15% CV)
|
||||||
|
|
||||||
|
"""
|
||||||
|
daily_prices: dict[str, list[float]] = {}
|
||||||
|
|
||||||
|
for interval in intervals:
|
||||||
|
starts_at = interval.get("startsAt")
|
||||||
|
if starts_at is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
dt = datetime.fromisoformat(starts_at) if isinstance(starts_at, str) else starts_at
|
||||||
|
date_key = dt.strftime("%Y-%m-%d")
|
||||||
|
price = float(interval["total"])
|
||||||
|
daily_prices.setdefault(date_key, []).append(price)
|
||||||
|
|
||||||
|
# Calculate CV using the shared function from utils/price.py
|
||||||
|
result = {}
|
||||||
|
for date_key, prices in daily_prices.items():
|
||||||
|
cv = calculate_coefficient_of_variation(prices)
|
||||||
|
result[date_key] = cv if cv is not None else 0.0
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def _get_adaptive_confidence_level(
|
||||||
|
interval: dict,
|
||||||
|
daily_cv: dict[str, float],
|
||||||
|
) -> float:
|
||||||
|
"""
|
||||||
|
Get adaptive confidence level based on daily coefficient of variation (CV).
|
||||||
|
|
||||||
|
Maps daily CV to confidence level:
|
||||||
|
- Low CV (≤10%): High confidence (2.5) → conservative, fewer smoothed
|
||||||
|
- High CV (≥30%): Low confidence (1.5) → aggressive, more smoothed
|
||||||
|
- Between: Linear interpolation
|
||||||
|
|
||||||
|
Uses the same CV calculation as volatility sensors for consistency.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
interval: Price interval dict with 'startsAt' key
|
||||||
|
daily_cv: Dict from _calculate_daily_cv()
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Confidence level multiplier for std_dev threshold
|
||||||
|
|
||||||
|
"""
|
||||||
|
starts_at = interval.get("startsAt")
|
||||||
|
if starts_at is None:
|
||||||
|
return CONFIDENCE_LEVEL_DEFAULT
|
||||||
|
|
||||||
|
dt = datetime.fromisoformat(starts_at) if isinstance(starts_at, str) else starts_at
|
||||||
|
date_key = dt.strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
cv = daily_cv.get(date_key, 0.0)
|
||||||
|
|
||||||
|
# Linear interpolation between LOW and HIGH CV
|
||||||
|
# Low CV → high confidence (conservative)
|
||||||
|
# High CV → low confidence (aggressive)
|
||||||
|
if cv <= DAILY_CV_LOW:
|
||||||
|
return CONFIDENCE_LEVEL_MAX
|
||||||
|
if cv >= DAILY_CV_HIGH:
|
||||||
|
return CONFIDENCE_LEVEL_MIN
|
||||||
|
|
||||||
|
# Linear interpolation: as CV increases, confidence decreases
|
||||||
|
ratio = (cv - DAILY_CV_LOW) / (DAILY_CV_HIGH - DAILY_CV_LOW)
|
||||||
|
return CONFIDENCE_LEVEL_MAX - (ratio * (CONFIDENCE_LEVEL_MAX - CONFIDENCE_LEVEL_MIN))
|
||||||
|
|
||||||
|
|
||||||
|
def _is_daily_extreme(
|
||||||
|
interval: dict,
|
||||||
|
daily_extremes: dict[str, tuple[float, float]],
|
||||||
|
tolerance: float = EXTREMES_PROTECTION_TOLERANCE,
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Check if an interval's price is at or very near a daily extreme.
|
||||||
|
|
||||||
|
Prices at daily extremes should never be smoothed because:
|
||||||
|
- Daily minimum is the reference for best_price period detection
|
||||||
|
- Daily maximum is the reference for peak_price period detection
|
||||||
|
- Smoothing these would cause periods to miss their most important intervals
|
||||||
|
|
||||||
|
Args:
|
||||||
|
interval: Price interval dict with 'startsAt' and 'total' keys
|
||||||
|
daily_extremes: Dict from _calculate_daily_extremes()
|
||||||
|
tolerance: Relative tolerance for matching (default 0.1%)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if the price is at or very near a daily min or max
|
||||||
|
|
||||||
|
"""
|
||||||
|
starts_at = interval.get("startsAt")
|
||||||
|
if starts_at is None:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Handle both datetime objects and ISO strings
|
||||||
|
dt = datetime.fromisoformat(starts_at) if isinstance(starts_at, str) else starts_at
|
||||||
|
|
||||||
|
date_key = dt.strftime("%Y-%m-%d")
|
||||||
|
if date_key not in daily_extremes:
|
||||||
|
return False
|
||||||
|
|
||||||
|
price = float(interval["total"])
|
||||||
|
daily_min, daily_max = daily_extremes[date_key]
|
||||||
|
|
||||||
|
# Check if price is within tolerance of daily min or max
|
||||||
|
# Using relative tolerance: |price - extreme| <= extreme * tolerance
|
||||||
|
min_threshold = daily_min * (1 + tolerance)
|
||||||
|
max_threshold = daily_max * (1 - tolerance)
|
||||||
|
|
||||||
|
return price <= min_threshold or price >= max_threshold
|
||||||
|
|
||||||
|
|
||||||
def filter_price_outliers(
|
def filter_price_outliers(
|
||||||
intervals: list[dict],
|
intervals: list[dict],
|
||||||
flexibility_pct: float,
|
flexibility_pct: float,
|
||||||
|
|
@ -260,15 +435,29 @@ def filter_price_outliers(
|
||||||
Intervals with smoothed prices (marked with _smoothed flag)
|
Intervals with smoothed prices (marked with _smoothed flag)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
# Convert percentage to ratio once for all comparisons (e.g., 15.0 → 0.15)
|
||||||
|
flexibility_ratio = flexibility_pct / 100
|
||||||
|
|
||||||
|
# Calculate daily extremes to protect reference prices from smoothing
|
||||||
|
# Daily min is the reference for best_price, daily max for peak_price
|
||||||
|
daily_extremes = _calculate_daily_extremes(intervals)
|
||||||
|
|
||||||
|
# Calculate daily coefficient of variation (CV) for adaptive confidence levels
|
||||||
|
# Uses same CV calculation as volatility sensors for consistency
|
||||||
|
# Flat days → conservative smoothing, volatile days → aggressive smoothing
|
||||||
|
daily_cv = _calculate_daily_cv(intervals)
|
||||||
|
|
||||||
|
# Log CV info for debugging (CV is in percentage points, e.g., 15.0 = 15%)
|
||||||
|
cv_info = ", ".join(f"{date}: {cv:.1f}%" for date, cv in sorted(daily_cv.items()))
|
||||||
_LOGGER.info(
|
_LOGGER.info(
|
||||||
"%sSmoothing price outliers: %d intervals, flex=%.1f%%",
|
"%sSmoothing price outliers: %d intervals, flex=%.1f%%, daily CV: %s",
|
||||||
INDENT_L0,
|
INDENT_L0,
|
||||||
len(intervals),
|
len(intervals),
|
||||||
flexibility_pct,
|
flexibility_pct,
|
||||||
|
cv_info,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Convert percentage to ratio once for all comparisons (e.g., 15.0 → 0.15)
|
protected_count = 0
|
||||||
flexibility_ratio = flexibility_pct / 100
|
|
||||||
|
|
||||||
result = []
|
result = []
|
||||||
smoothed_count = 0
|
smoothed_count = 0
|
||||||
|
|
@ -276,6 +465,20 @@ def filter_price_outliers(
|
||||||
for i, current in enumerate(intervals):
|
for i, current in enumerate(intervals):
|
||||||
current_price = current["total"]
|
current_price = current["total"]
|
||||||
|
|
||||||
|
# CRITICAL: Never smooth daily extremes - they are the reference prices!
|
||||||
|
# Smoothing the daily min would break best_price period detection,
|
||||||
|
# smoothing the daily max would break peak_price period detection.
|
||||||
|
if _is_daily_extreme(current, daily_extremes):
|
||||||
|
result.append(current)
|
||||||
|
protected_count += 1
|
||||||
|
_LOGGER_DETAILS.debug(
|
||||||
|
"%sProtected daily extreme at %s: %.2f ct/kWh (not smoothed)",
|
||||||
|
INDENT_L0,
|
||||||
|
current.get("startsAt", f"index {i}"),
|
||||||
|
current_price * 100,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
# Get context windows (3 intervals before and after)
|
# Get context windows (3 intervals before and after)
|
||||||
context_before = intervals[max(0, i - MIN_CONTEXT_SIZE) : i]
|
context_before = intervals[max(0, i - MIN_CONTEXT_SIZE) : i]
|
||||||
context_after = intervals[i + 1 : min(len(intervals), i + 1 + MIN_CONTEXT_SIZE)]
|
context_after = intervals[i + 1 : min(len(intervals), i + 1 + MIN_CONTEXT_SIZE)]
|
||||||
|
|
@ -297,8 +500,11 @@ def filter_price_outliers(
|
||||||
# Calculate how far current price deviates from expected
|
# Calculate how far current price deviates from expected
|
||||||
residual = abs(current_price - expected_price)
|
residual = abs(current_price - expected_price)
|
||||||
|
|
||||||
# Tolerance based on statistical confidence (2 std dev = 95% confidence)
|
# Adaptive confidence level based on daily CV:
|
||||||
tolerance = stats["std_dev"] * CONFIDENCE_LEVEL
|
# - Flat days (low CV): higher confidence (2.5) → fewer false positives
|
||||||
|
# - Volatile days (high CV): lower confidence (1.5) → catch more real spikes
|
||||||
|
confidence_level = _get_adaptive_confidence_level(current, daily_cv)
|
||||||
|
tolerance = stats["std_dev"] * confidence_level
|
||||||
|
|
||||||
# Not a spike if within tolerance
|
# Not a spike if within tolerance
|
||||||
if residual <= tolerance:
|
if residual <= tolerance:
|
||||||
|
|
@ -332,23 +538,22 @@ def filter_price_outliers(
|
||||||
smoothed_count += 1
|
smoothed_count += 1
|
||||||
|
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"%sSmoothed spike at %s: %.2f → %.2f ct/kWh (residual: %.2f, tolerance: %.2f, trend_slope: %.4f)",
|
"%sSmoothed spike at %s: %.2f → %.2f ct/kWh (residual: %.2f, tolerance: %.2f, confidence: %.2f)",
|
||||||
INDENT_L0,
|
INDENT_L0,
|
||||||
current.get("startsAt", f"index {i}"),
|
current.get("startsAt", f"index {i}"),
|
||||||
current_price * 100,
|
current_price * 100,
|
||||||
expected_price * 100,
|
expected_price * 100,
|
||||||
residual * 100,
|
residual * 100,
|
||||||
tolerance * 100,
|
tolerance * 100,
|
||||||
stats["trend_slope"] * 100,
|
confidence_level,
|
||||||
)
|
)
|
||||||
|
|
||||||
if smoothed_count > 0:
|
if smoothed_count > 0 or protected_count > 0:
|
||||||
_LOGGER.info(
|
_LOGGER.info(
|
||||||
"%sPrice outlier smoothing complete: %d/%d intervals smoothed (%.1f%%)",
|
"%sPrice outlier smoothing complete: %d smoothed, %d protected (daily extremes)",
|
||||||
INDENT_L0,
|
INDENT_L0,
|
||||||
smoothed_count,
|
smoothed_count,
|
||||||
len(intervals),
|
protected_count,
|
||||||
(smoothed_count / len(intervals)) * 100,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
|
||||||
|
|
@ -3,13 +3,12 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
from datetime import date, datetime, timedelta
|
||||||
from typing import TYPE_CHECKING, Any
|
from typing import TYPE_CHECKING, Any
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import PRICE_LEVEL_MAPPING
|
from custom_components.tibber_prices.const import PRICE_LEVEL_MAPPING
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from datetime import date
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
||||||
|
|
||||||
from .level_filtering import (
|
from .level_filtering import (
|
||||||
|
|
@ -281,3 +280,428 @@ def filter_periods_by_end_date(periods: list[list[dict]], *, time: TibberPricesT
|
||||||
filtered.append(period)
|
filtered.append(period)
|
||||||
|
|
||||||
return filtered
|
return filtered
|
||||||
|
|
||||||
|
|
||||||
|
def _categorize_periods_for_supersession(
|
||||||
|
period_summaries: list[dict],
|
||||||
|
today: date,
|
||||||
|
tomorrow: date,
|
||||||
|
late_hour_threshold: int,
|
||||||
|
early_hour_limit: int,
|
||||||
|
) -> tuple[list[dict], list[dict], list[dict]]:
|
||||||
|
"""Categorize periods into today-late, tomorrow-early, and other."""
|
||||||
|
today_late: list[dict] = []
|
||||||
|
tomorrow_early: list[dict] = []
|
||||||
|
other: list[dict] = []
|
||||||
|
|
||||||
|
for period in period_summaries:
|
||||||
|
period_start = period.get("start")
|
||||||
|
period_end = period.get("end")
|
||||||
|
|
||||||
|
if not period_start or not period_end:
|
||||||
|
other.append(period)
|
||||||
|
# Today late-night periods: START today at or after late_hour_threshold (e.g., 20:00)
|
||||||
|
# Note: period_end could be tomorrow (e.g., 23:30-00:00 spans midnight)
|
||||||
|
elif period_start.date() == today and period_start.hour >= late_hour_threshold:
|
||||||
|
today_late.append(period)
|
||||||
|
# Tomorrow early-morning periods: START tomorrow before early_hour_limit (e.g., 08:00)
|
||||||
|
elif period_start.date() == tomorrow and period_start.hour < early_hour_limit:
|
||||||
|
tomorrow_early.append(period)
|
||||||
|
else:
|
||||||
|
other.append(period)
|
||||||
|
|
||||||
|
return today_late, tomorrow_early, other
|
||||||
|
|
||||||
|
|
||||||
|
def _filter_superseded_today_periods(
|
||||||
|
today_late_periods: list[dict],
|
||||||
|
best_tomorrow: dict,
|
||||||
|
best_tomorrow_price: float,
|
||||||
|
improvement_threshold: float,
|
||||||
|
) -> list[dict]:
|
||||||
|
"""Filter today periods that are superseded by a better tomorrow period."""
|
||||||
|
kept: list[dict] = []
|
||||||
|
|
||||||
|
for today_period in today_late_periods:
|
||||||
|
today_price = today_period.get("price_mean")
|
||||||
|
|
||||||
|
if today_price is None:
|
||||||
|
kept.append(today_period)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Calculate how much better tomorrow is (as percentage)
|
||||||
|
improvement_pct = ((today_price - best_tomorrow_price) / today_price * 100) if today_price > 0 else 0
|
||||||
|
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Supersession check: Today %s-%s (%.4f) vs Tomorrow %s-%s (%.4f) = %.1f%% improvement (threshold: %.1f%%)",
|
||||||
|
today_period["start"].strftime("%H:%M"),
|
||||||
|
today_period["end"].strftime("%H:%M"),
|
||||||
|
today_price,
|
||||||
|
best_tomorrow["start"].strftime("%H:%M"),
|
||||||
|
best_tomorrow["end"].strftime("%H:%M"),
|
||||||
|
best_tomorrow_price,
|
||||||
|
improvement_pct,
|
||||||
|
improvement_threshold,
|
||||||
|
)
|
||||||
|
|
||||||
|
if improvement_pct >= improvement_threshold:
|
||||||
|
_LOGGER.info(
|
||||||
|
"Period superseded: Today %s-%s (%.2f) replaced by Tomorrow %s-%s (%.2f, %.1f%% better)",
|
||||||
|
today_period["start"].strftime("%H:%M"),
|
||||||
|
today_period["end"].strftime("%H:%M"),
|
||||||
|
today_price,
|
||||||
|
best_tomorrow["start"].strftime("%H:%M"),
|
||||||
|
best_tomorrow["end"].strftime("%H:%M"),
|
||||||
|
best_tomorrow_price,
|
||||||
|
improvement_pct,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
kept.append(today_period)
|
||||||
|
|
||||||
|
return kept
|
||||||
|
|
||||||
|
|
||||||
|
def filter_superseded_periods(
|
||||||
|
period_summaries: list[dict],
|
||||||
|
*,
|
||||||
|
time: TibberPricesTimeService,
|
||||||
|
reverse_sort: bool,
|
||||||
|
) -> list[dict]:
|
||||||
|
"""
|
||||||
|
Filter out late-night today periods that are superseded by better tomorrow periods.
|
||||||
|
|
||||||
|
When tomorrow's data becomes available, some late-night periods that were found
|
||||||
|
through relaxation may no longer make sense. If tomorrow has a significantly
|
||||||
|
better period in the early morning, the late-night today period is obsolete.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
- Today 23:30-00:00 at 0.70 kr (found via relaxation, was best available)
|
||||||
|
- Tomorrow 04:00-05:30 at 0.50 kr (much better alternative)
|
||||||
|
→ The today period is superseded and should be filtered out
|
||||||
|
|
||||||
|
This only applies to best-price periods (reverse_sort=False).
|
||||||
|
Peak-price periods are not filtered this way.
|
||||||
|
|
||||||
|
"""
|
||||||
|
from .types import ( # noqa: PLC0415
|
||||||
|
CROSS_DAY_LATE_PERIOD_START_HOUR,
|
||||||
|
CROSS_DAY_MAX_EXTENSION_HOUR,
|
||||||
|
SUPERSESSION_PRICE_IMPROVEMENT_PCT,
|
||||||
|
)
|
||||||
|
|
||||||
|
_LOGGER.debug(
|
||||||
|
"filter_superseded_periods called: %d periods, reverse_sort=%s",
|
||||||
|
len(period_summaries) if period_summaries else 0,
|
||||||
|
reverse_sort,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Only filter for best-price periods
|
||||||
|
if reverse_sort or not period_summaries:
|
||||||
|
return period_summaries
|
||||||
|
|
||||||
|
now = time.now()
|
||||||
|
today = now.date()
|
||||||
|
tomorrow = today + timedelta(days=1)
|
||||||
|
|
||||||
|
# Categorize periods
|
||||||
|
today_late, tomorrow_early, other = _categorize_periods_for_supersession(
|
||||||
|
period_summaries,
|
||||||
|
today,
|
||||||
|
tomorrow,
|
||||||
|
CROSS_DAY_LATE_PERIOD_START_HOUR,
|
||||||
|
CROSS_DAY_MAX_EXTENSION_HOUR,
|
||||||
|
)
|
||||||
|
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Supersession categorization: today_late=%d, tomorrow_early=%d, other=%d",
|
||||||
|
len(today_late),
|
||||||
|
len(tomorrow_early),
|
||||||
|
len(other),
|
||||||
|
)
|
||||||
|
|
||||||
|
# If no tomorrow early periods, nothing to compare against
|
||||||
|
if not tomorrow_early:
|
||||||
|
_LOGGER.debug("No tomorrow early periods - skipping supersession check")
|
||||||
|
return period_summaries
|
||||||
|
|
||||||
|
# Find the best tomorrow early period (lowest mean price)
|
||||||
|
best_tomorrow = min(tomorrow_early, key=lambda p: p.get("price_mean", float("inf")))
|
||||||
|
best_tomorrow_price = best_tomorrow.get("price_mean")
|
||||||
|
|
||||||
|
if best_tomorrow_price is None:
|
||||||
|
return period_summaries
|
||||||
|
|
||||||
|
# Filter superseded today periods
|
||||||
|
kept_today = _filter_superseded_today_periods(
|
||||||
|
today_late,
|
||||||
|
best_tomorrow,
|
||||||
|
best_tomorrow_price,
|
||||||
|
SUPERSESSION_PRICE_IMPROVEMENT_PCT,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Reconstruct and sort by start time
|
||||||
|
result = other + kept_today + tomorrow_early
|
||||||
|
result.sort(key=lambda p: p.get("start") or time.now())
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def _is_period_eligible_for_extension(
|
||||||
|
period: dict,
|
||||||
|
today: date,
|
||||||
|
late_hour_threshold: int,
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Check if a period is eligible for cross-day extension.
|
||||||
|
|
||||||
|
Eligibility criteria:
|
||||||
|
- Period has valid start and end times
|
||||||
|
- Period ends on today (not yesterday or tomorrow)
|
||||||
|
- Period ends late (after late_hour_threshold, e.g. 20:00)
|
||||||
|
|
||||||
|
"""
|
||||||
|
period_end = period.get("end")
|
||||||
|
period_start = period.get("start")
|
||||||
|
|
||||||
|
if not period_end or not period_start:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if period_end.date() != today:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return period_end.hour >= late_hour_threshold
|
||||||
|
|
||||||
|
|
||||||
|
def _find_extension_intervals(
|
||||||
|
period_end: datetime,
|
||||||
|
price_lookup: dict[str, dict],
|
||||||
|
criteria: Any,
|
||||||
|
max_extension_time: datetime,
|
||||||
|
interval_duration: timedelta,
|
||||||
|
) -> list[dict]:
|
||||||
|
"""
|
||||||
|
Find consecutive intervals after period_end that meet criteria.
|
||||||
|
|
||||||
|
Iterates forward from period_end, adding intervals while they
|
||||||
|
meet the flex and min_distance criteria. Stops at first failure
|
||||||
|
or when reaching max_extension_time.
|
||||||
|
|
||||||
|
"""
|
||||||
|
from .level_filtering import check_interval_criteria # noqa: PLC0415
|
||||||
|
|
||||||
|
extension_intervals: list[dict] = []
|
||||||
|
check_time = period_end
|
||||||
|
|
||||||
|
while check_time < max_extension_time:
|
||||||
|
price_data = price_lookup.get(check_time.isoformat())
|
||||||
|
if not price_data:
|
||||||
|
break # No more data
|
||||||
|
|
||||||
|
price = float(price_data["total"])
|
||||||
|
in_flex, meets_min_distance = check_interval_criteria(price, criteria)
|
||||||
|
|
||||||
|
if not (in_flex and meets_min_distance):
|
||||||
|
break # Criteria no longer met
|
||||||
|
|
||||||
|
extension_intervals.append(price_data)
|
||||||
|
check_time = check_time + interval_duration
|
||||||
|
|
||||||
|
return extension_intervals
|
||||||
|
|
||||||
|
|
||||||
|
def _collect_original_period_prices(
|
||||||
|
period_start: datetime,
|
||||||
|
period_end: datetime,
|
||||||
|
price_lookup: dict[str, dict],
|
||||||
|
interval_duration: timedelta,
|
||||||
|
) -> list[float]:
|
||||||
|
"""Collect prices from original period for CV calculation."""
|
||||||
|
prices: list[float] = []
|
||||||
|
current = period_start
|
||||||
|
while current < period_end:
|
||||||
|
price_data = price_lookup.get(current.isoformat())
|
||||||
|
if price_data:
|
||||||
|
prices.append(float(price_data["total"]))
|
||||||
|
current = current + interval_duration
|
||||||
|
return prices
|
||||||
|
|
||||||
|
|
||||||
|
def _build_extended_period(
|
||||||
|
period: dict,
|
||||||
|
extension_intervals: list[dict],
|
||||||
|
combined_prices: list[float],
|
||||||
|
combined_cv: float,
|
||||||
|
interval_duration: timedelta,
|
||||||
|
) -> dict:
|
||||||
|
"""Create extended period dict with updated statistics."""
|
||||||
|
period_start = period["start"]
|
||||||
|
period_end = period["end"]
|
||||||
|
new_end = period_end + (interval_duration * len(extension_intervals))
|
||||||
|
|
||||||
|
extended = period.copy()
|
||||||
|
extended["end"] = new_end
|
||||||
|
extended["duration_minutes"] = int((new_end - period_start).total_seconds() / 60)
|
||||||
|
extended["period_interval_count"] = len(combined_prices)
|
||||||
|
extended["cross_day_extended"] = True
|
||||||
|
extended["cross_day_extension_intervals"] = len(extension_intervals)
|
||||||
|
|
||||||
|
# Recalculate price statistics
|
||||||
|
extended["price_min"] = min(combined_prices)
|
||||||
|
extended["price_max"] = max(combined_prices)
|
||||||
|
extended["price_mean"] = sum(combined_prices) / len(combined_prices)
|
||||||
|
extended["price_spread"] = extended["price_max"] - extended["price_min"]
|
||||||
|
extended["price_coefficient_variation_%"] = round(combined_cv, 1)
|
||||||
|
|
||||||
|
return extended
|
||||||
|
|
||||||
|
|
||||||
|
def extend_periods_across_midnight(
|
||||||
|
period_summaries: list[dict],
|
||||||
|
all_prices: list[dict],
|
||||||
|
price_context: dict[str, Any],
|
||||||
|
*,
|
||||||
|
time: TibberPricesTimeService,
|
||||||
|
reverse_sort: bool,
|
||||||
|
) -> list[dict]:
|
||||||
|
"""
|
||||||
|
Extend late-night periods across midnight if favorable prices continue.
|
||||||
|
|
||||||
|
When a period ends close to midnight and tomorrow's data shows continued
|
||||||
|
favorable prices, extend the period into the next day. This prevents
|
||||||
|
artificial period breaks at midnight when it's actually better to continue.
|
||||||
|
|
||||||
|
Example: Best price period 22:00-23:45 today could extend to 04:00 tomorrow
|
||||||
|
if prices remain low overnight.
|
||||||
|
|
||||||
|
Rules:
|
||||||
|
- Only extends periods ending after CROSS_DAY_LATE_PERIOD_START_HOUR (20:00)
|
||||||
|
- Won't extend beyond CROSS_DAY_MAX_EXTENSION_HOUR (08:00) next day
|
||||||
|
- Extension must pass same flex criteria as original period
|
||||||
|
- Quality Gate (CV check) applies to extended period
|
||||||
|
|
||||||
|
Args:
|
||||||
|
period_summaries: List of period summary dicts (already processed)
|
||||||
|
all_prices: All price intervals including tomorrow
|
||||||
|
price_context: Dict with ref_prices, avg_prices, flex, min_distance_from_avg
|
||||||
|
time: Time service instance
|
||||||
|
reverse_sort: True for peak price, False for best price
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Updated list of period summaries with extensions applied
|
||||||
|
|
||||||
|
"""
|
||||||
|
from custom_components.tibber_prices.utils.price import calculate_coefficient_of_variation # noqa: PLC0415
|
||||||
|
|
||||||
|
from .types import ( # noqa: PLC0415
|
||||||
|
CROSS_DAY_LATE_PERIOD_START_HOUR,
|
||||||
|
CROSS_DAY_MAX_EXTENSION_HOUR,
|
||||||
|
PERIOD_MAX_CV,
|
||||||
|
TibberPricesIntervalCriteria,
|
||||||
|
)
|
||||||
|
|
||||||
|
if not period_summaries or not all_prices:
|
||||||
|
return period_summaries
|
||||||
|
|
||||||
|
# Build price lookup by timestamp
|
||||||
|
price_lookup: dict[str, dict] = {}
|
||||||
|
for price_data in all_prices:
|
||||||
|
interval_time = time.get_interval_time(price_data)
|
||||||
|
if interval_time:
|
||||||
|
price_lookup[interval_time.isoformat()] = price_data
|
||||||
|
|
||||||
|
ref_prices = price_context.get("ref_prices", {})
|
||||||
|
avg_prices = price_context.get("avg_prices", {})
|
||||||
|
flex = price_context.get("flex", 0.15)
|
||||||
|
min_distance = price_context.get("min_distance_from_avg", 0)
|
||||||
|
|
||||||
|
now = time.now()
|
||||||
|
today = now.date()
|
||||||
|
tomorrow = today + timedelta(days=1)
|
||||||
|
interval_duration = time.get_interval_duration()
|
||||||
|
|
||||||
|
# Max extension time (e.g., 08:00 tomorrow)
|
||||||
|
max_extension_time = time.start_of_local_day(now) + timedelta(days=1, hours=CROSS_DAY_MAX_EXTENSION_HOUR)
|
||||||
|
|
||||||
|
extended_summaries = []
|
||||||
|
|
||||||
|
for period in period_summaries:
|
||||||
|
# Check eligibility for extension
|
||||||
|
if not _is_period_eligible_for_extension(period, today, CROSS_DAY_LATE_PERIOD_START_HOUR):
|
||||||
|
extended_summaries.append(period)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Get tomorrow's reference prices
|
||||||
|
tomorrow_ref = ref_prices.get(tomorrow) or ref_prices.get(str(tomorrow))
|
||||||
|
tomorrow_avg = avg_prices.get(tomorrow) or avg_prices.get(str(tomorrow))
|
||||||
|
|
||||||
|
if tomorrow_ref is None or tomorrow_avg is None:
|
||||||
|
extended_summaries.append(period)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Set up criteria for extension check
|
||||||
|
criteria = TibberPricesIntervalCriteria(
|
||||||
|
ref_price=tomorrow_ref,
|
||||||
|
avg_price=tomorrow_avg,
|
||||||
|
flex=flex,
|
||||||
|
min_distance_from_avg=min_distance,
|
||||||
|
reverse_sort=reverse_sort,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Find extension intervals
|
||||||
|
extension_intervals = _find_extension_intervals(
|
||||||
|
period["end"],
|
||||||
|
price_lookup,
|
||||||
|
criteria,
|
||||||
|
max_extension_time,
|
||||||
|
interval_duration,
|
||||||
|
)
|
||||||
|
|
||||||
|
if not extension_intervals:
|
||||||
|
extended_summaries.append(period)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Collect all prices for CV check
|
||||||
|
original_prices = _collect_original_period_prices(
|
||||||
|
period["start"],
|
||||||
|
period["end"],
|
||||||
|
price_lookup,
|
||||||
|
interval_duration,
|
||||||
|
)
|
||||||
|
extension_prices = [float(p["total"]) for p in extension_intervals]
|
||||||
|
combined_prices = original_prices + extension_prices
|
||||||
|
|
||||||
|
# Quality Gate: Check CV of extended period
|
||||||
|
combined_cv = calculate_coefficient_of_variation(combined_prices)
|
||||||
|
|
||||||
|
if combined_cv is not None and combined_cv <= PERIOD_MAX_CV:
|
||||||
|
# Extension passes quality gate
|
||||||
|
extended_period = _build_extended_period(
|
||||||
|
period,
|
||||||
|
extension_intervals,
|
||||||
|
combined_prices,
|
||||||
|
combined_cv,
|
||||||
|
interval_duration,
|
||||||
|
)
|
||||||
|
|
||||||
|
_LOGGER.info(
|
||||||
|
"Cross-day extension: Period %s-%s extended to %s (+%d intervals, CV=%.1f%%)",
|
||||||
|
period["start"].strftime("%H:%M"),
|
||||||
|
period["end"].strftime("%H:%M"),
|
||||||
|
extended_period["end"].strftime("%H:%M"),
|
||||||
|
len(extension_intervals),
|
||||||
|
combined_cv,
|
||||||
|
)
|
||||||
|
extended_summaries.append(extended_period)
|
||||||
|
else:
|
||||||
|
# Extension would exceed quality gate
|
||||||
|
_LOGGER_DETAILS.debug(
|
||||||
|
"%sCross-day extension rejected for period %s-%s: CV=%.1f%% > %.1f%%",
|
||||||
|
INDENT_L0,
|
||||||
|
period["start"].strftime("%H:%M"),
|
||||||
|
period["end"].strftime("%H:%M"),
|
||||||
|
combined_cv or 0,
|
||||||
|
PERIOD_MAX_CV,
|
||||||
|
)
|
||||||
|
extended_summaries.append(period)
|
||||||
|
|
||||||
|
return extended_summaries
|
||||||
|
|
|
||||||
|
|
@ -17,6 +17,41 @@ INDENT_L1 = " " # Nested logic / loop iterations
|
||||||
INDENT_L2 = " " # Deeper nesting
|
INDENT_L2 = " " # Deeper nesting
|
||||||
|
|
||||||
|
|
||||||
|
def _estimate_merged_cv(period1: dict, period2: dict) -> float | None:
|
||||||
|
"""
|
||||||
|
Estimate the CV of a merged period from two period summaries.
|
||||||
|
|
||||||
|
Since we don't have the raw prices, we estimate using the combined min/max range.
|
||||||
|
This is a conservative estimate - the actual CV could be higher or lower.
|
||||||
|
|
||||||
|
Formula: CV ≈ (range / 2) / mean * 100
|
||||||
|
Where range = max - min, mean = (min + max) / 2
|
||||||
|
|
||||||
|
This approximation assumes roughly uniform distribution within the range.
|
||||||
|
"""
|
||||||
|
p1_min = period1.get("price_min")
|
||||||
|
p1_max = period1.get("price_max")
|
||||||
|
p2_min = period2.get("price_min")
|
||||||
|
p2_max = period2.get("price_max")
|
||||||
|
|
||||||
|
if None in (p1_min, p1_max, p2_min, p2_max):
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Cast to float - None case handled above
|
||||||
|
combined_min = min(float(p1_min), float(p2_min)) # type: ignore[arg-type]
|
||||||
|
combined_max = max(float(p1_max), float(p2_max)) # type: ignore[arg-type]
|
||||||
|
|
||||||
|
if combined_min <= 0:
|
||||||
|
return None
|
||||||
|
|
||||||
|
combined_mean = (combined_min + combined_max) / 2
|
||||||
|
price_range = combined_max - combined_min
|
||||||
|
|
||||||
|
# CV estimate based on range (assuming uniform distribution)
|
||||||
|
# For uniform distribution: std_dev ≈ range / sqrt(12) ≈ range / 3.46
|
||||||
|
return (price_range / 3.46) / combined_mean * 100
|
||||||
|
|
||||||
|
|
||||||
def recalculate_period_metadata(periods: list[dict], *, time: TibberPricesTimeService) -> None:
|
def recalculate_period_metadata(periods: list[dict], *, time: TibberPricesTimeService) -> None:
|
||||||
"""
|
"""
|
||||||
Recalculate period metadata after merging periods.
|
Recalculate period metadata after merging periods.
|
||||||
|
|
@ -105,7 +140,7 @@ def merge_adjacent_periods(period1: dict, period2: dict) -> dict:
|
||||||
"period2_end": period2["end"].isoformat(),
|
"period2_end": period2["end"].isoformat(),
|
||||||
}
|
}
|
||||||
|
|
||||||
_LOGGER.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"%sMerged periods: %s-%s + %s-%s → %s-%s (duration: %d min)",
|
"%sMerged periods: %s-%s + %s-%s → %s-%s (duration: %d min)",
|
||||||
INDENT_L2,
|
INDENT_L2,
|
||||||
period1["start"].strftime("%H:%M"),
|
period1["start"].strftime("%H:%M"),
|
||||||
|
|
@ -120,6 +155,119 @@ def merge_adjacent_periods(period1: dict, period2: dict) -> dict:
|
||||||
return merged
|
return merged
|
||||||
|
|
||||||
|
|
||||||
|
def _check_merge_quality_gate(periods_to_merge: list[tuple[int, dict]], relaxed: dict) -> bool:
|
||||||
|
"""
|
||||||
|
Check if merging would create a period that's too heterogeneous.
|
||||||
|
|
||||||
|
Returns True if merge is allowed, False if blocked by Quality Gate.
|
||||||
|
"""
|
||||||
|
from .types import PERIOD_MAX_CV # noqa: PLC0415
|
||||||
|
|
||||||
|
relaxed_start = relaxed["start"]
|
||||||
|
relaxed_end = relaxed["end"]
|
||||||
|
|
||||||
|
for _idx, existing in periods_to_merge:
|
||||||
|
estimated_cv = _estimate_merged_cv(existing, relaxed)
|
||||||
|
if estimated_cv is not None and estimated_cv > PERIOD_MAX_CV:
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Merge blocked by Quality Gate: %s-%s + %s-%s would have CV≈%.1f%% (max: %.1f%%)",
|
||||||
|
existing["start"].strftime("%H:%M"),
|
||||||
|
existing["end"].strftime("%H:%M"),
|
||||||
|
relaxed_start.strftime("%H:%M"),
|
||||||
|
relaxed_end.strftime("%H:%M"),
|
||||||
|
estimated_cv,
|
||||||
|
PERIOD_MAX_CV,
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def _would_swallow_existing(relaxed: dict, existing_periods: list[dict]) -> bool:
|
||||||
|
"""
|
||||||
|
Check if the relaxed period would "swallow" any existing period.
|
||||||
|
|
||||||
|
A period is "swallowed" if the new relaxed period completely contains it.
|
||||||
|
In this case, we should NOT merge - the existing smaller period is more
|
||||||
|
homogeneous and should be preserved.
|
||||||
|
|
||||||
|
This prevents relaxation from replacing good small periods with larger,
|
||||||
|
more heterogeneous ones.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if any existing period would be swallowed (merge should be blocked)
|
||||||
|
False if safe to proceed with merge evaluation
|
||||||
|
|
||||||
|
"""
|
||||||
|
relaxed_start = relaxed["start"]
|
||||||
|
relaxed_end = relaxed["end"]
|
||||||
|
|
||||||
|
for existing in existing_periods:
|
||||||
|
existing_start = existing["start"]
|
||||||
|
existing_end = existing["end"]
|
||||||
|
|
||||||
|
# Check if relaxed completely contains existing
|
||||||
|
if relaxed_start <= existing_start and relaxed_end >= existing_end:
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Blocking merge: %s-%s would swallow %s-%s (keeping smaller period)",
|
||||||
|
relaxed_start.strftime("%H:%M"),
|
||||||
|
relaxed_end.strftime("%H:%M"),
|
||||||
|
existing_start.strftime("%H:%M"),
|
||||||
|
existing_end.strftime("%H:%M"),
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _is_duplicate_period(relaxed: dict, existing_periods: list[dict], tolerance_seconds: int = 60) -> bool:
|
||||||
|
"""Check if relaxed period is a duplicate of any existing period."""
|
||||||
|
relaxed_start = relaxed["start"]
|
||||||
|
relaxed_end = relaxed["end"]
|
||||||
|
|
||||||
|
for existing in existing_periods:
|
||||||
|
if (
|
||||||
|
abs((relaxed_start - existing["start"]).total_seconds()) < tolerance_seconds
|
||||||
|
and abs((relaxed_end - existing["end"]).total_seconds()) < tolerance_seconds
|
||||||
|
):
|
||||||
|
_LOGGER_DETAILS.debug(
|
||||||
|
"%sSkipping duplicate period %s-%s (already exists)",
|
||||||
|
INDENT_L1,
|
||||||
|
relaxed_start.strftime("%H:%M"),
|
||||||
|
relaxed_end.strftime("%H:%M"),
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _find_adjacent_or_overlapping(relaxed: dict, existing_periods: list[dict]) -> list[tuple[int, dict]]:
|
||||||
|
"""Find all periods that are adjacent to or overlapping with the relaxed period."""
|
||||||
|
relaxed_start = relaxed["start"]
|
||||||
|
relaxed_end = relaxed["end"]
|
||||||
|
periods_to_merge = []
|
||||||
|
|
||||||
|
for idx, existing in enumerate(existing_periods):
|
||||||
|
existing_start = existing["start"]
|
||||||
|
existing_end = existing["end"]
|
||||||
|
|
||||||
|
# Check if adjacent (no gap) or overlapping
|
||||||
|
is_adjacent = relaxed_end == existing_start or relaxed_start == existing_end
|
||||||
|
is_overlapping = relaxed_start < existing_end and relaxed_end > existing_start
|
||||||
|
|
||||||
|
if is_adjacent or is_overlapping:
|
||||||
|
periods_to_merge.append((idx, existing))
|
||||||
|
_LOGGER_DETAILS.debug(
|
||||||
|
"%sPeriod %s-%s %s with existing period %s-%s",
|
||||||
|
INDENT_L1,
|
||||||
|
relaxed_start.strftime("%H:%M"),
|
||||||
|
relaxed_end.strftime("%H:%M"),
|
||||||
|
"overlaps" if is_overlapping else "is adjacent to",
|
||||||
|
existing_start.strftime("%H:%M"),
|
||||||
|
existing_end.strftime("%H:%M"),
|
||||||
|
)
|
||||||
|
|
||||||
|
return periods_to_merge
|
||||||
|
|
||||||
|
|
||||||
def resolve_period_overlaps(
|
def resolve_period_overlaps(
|
||||||
existing_periods: list[dict],
|
existing_periods: list[dict],
|
||||||
new_relaxed_periods: list[dict],
|
new_relaxed_periods: list[dict],
|
||||||
|
|
@ -130,6 +278,10 @@ def resolve_period_overlaps(
|
||||||
Adjacent or overlapping periods are merged into single continuous periods.
|
Adjacent or overlapping periods are merged into single continuous periods.
|
||||||
The newer period's relaxation attributes override the older period's.
|
The newer period's relaxation attributes override the older period's.
|
||||||
|
|
||||||
|
Quality Gate: Merging is blocked if the combined period would have
|
||||||
|
an estimated CV above PERIOD_MAX_CV (25%), to prevent creating
|
||||||
|
periods with excessive internal price variation.
|
||||||
|
|
||||||
This function is called incrementally after each relaxation phase:
|
This function is called incrementally after each relaxation phase:
|
||||||
- Phase 1: existing = baseline, new = first relaxation
|
- Phase 1: existing = baseline, new = first relaxation
|
||||||
- Phase 2: existing = baseline + phase 1, new = second relaxation
|
- Phase 2: existing = baseline + phase 1, new = second relaxation
|
||||||
|
|
@ -145,7 +297,7 @@ def resolve_period_overlaps(
|
||||||
- new_periods_count: Number of new periods added (some may have been merged)
|
- new_periods_count: Number of new periods added (some may have been merged)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
_LOGGER.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"%sresolve_period_overlaps called: existing=%d, new=%d",
|
"%sresolve_period_overlaps called: existing=%d, new=%d",
|
||||||
INDENT_L0,
|
INDENT_L0,
|
||||||
len(existing_periods),
|
len(existing_periods),
|
||||||
|
|
@ -167,74 +319,60 @@ def resolve_period_overlaps(
|
||||||
relaxed_end = relaxed["end"]
|
relaxed_end = relaxed["end"]
|
||||||
|
|
||||||
# Check if this period is duplicate (exact match within tolerance)
|
# Check if this period is duplicate (exact match within tolerance)
|
||||||
tolerance_seconds = 60 # 1 minute tolerance
|
if _is_duplicate_period(relaxed, merged):
|
||||||
is_duplicate = False
|
continue
|
||||||
for existing in merged:
|
|
||||||
if (
|
|
||||||
abs((relaxed_start - existing["start"]).total_seconds()) < tolerance_seconds
|
|
||||||
and abs((relaxed_end - existing["end"]).total_seconds()) < tolerance_seconds
|
|
||||||
):
|
|
||||||
is_duplicate = True
|
|
||||||
_LOGGER.debug(
|
|
||||||
"%sSkipping duplicate period %s-%s (already exists)",
|
|
||||||
INDENT_L1,
|
|
||||||
relaxed_start.strftime("%H:%M"),
|
|
||||||
relaxed_end.strftime("%H:%M"),
|
|
||||||
)
|
|
||||||
break
|
|
||||||
|
|
||||||
if is_duplicate:
|
# Check if this period would "swallow" an existing smaller period
|
||||||
|
# In that case, skip it - the smaller existing period is more homogeneous
|
||||||
|
if _would_swallow_existing(relaxed, merged):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Find periods that are adjacent or overlapping (should be merged)
|
# Find periods that are adjacent or overlapping (should be merged)
|
||||||
periods_to_merge = []
|
periods_to_merge = _find_adjacent_or_overlapping(relaxed, merged)
|
||||||
for idx, existing in enumerate(merged):
|
|
||||||
existing_start = existing["start"]
|
|
||||||
existing_end = existing["end"]
|
|
||||||
|
|
||||||
# Check if adjacent (no gap) or overlapping
|
|
||||||
is_adjacent = relaxed_end == existing_start or relaxed_start == existing_end
|
|
||||||
is_overlapping = relaxed_start < existing_end and relaxed_end > existing_start
|
|
||||||
|
|
||||||
if is_adjacent or is_overlapping:
|
|
||||||
periods_to_merge.append((idx, existing))
|
|
||||||
_LOGGER.debug(
|
|
||||||
"%sPeriod %s-%s %s with existing period %s-%s",
|
|
||||||
INDENT_L1,
|
|
||||||
relaxed_start.strftime("%H:%M"),
|
|
||||||
relaxed_end.strftime("%H:%M"),
|
|
||||||
"overlaps" if is_overlapping else "is adjacent to",
|
|
||||||
existing_start.strftime("%H:%M"),
|
|
||||||
existing_end.strftime("%H:%M"),
|
|
||||||
)
|
|
||||||
|
|
||||||
if not periods_to_merge:
|
if not periods_to_merge:
|
||||||
# No merge needed - add as new period
|
# No merge needed - add as new period
|
||||||
merged.append(relaxed)
|
merged.append(relaxed)
|
||||||
periods_added += 1
|
periods_added += 1
|
||||||
_LOGGER.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"%sAdded new period %s-%s (no overlap/adjacency)",
|
"%sAdded new period %s-%s (no overlap/adjacency)",
|
||||||
INDENT_L1,
|
INDENT_L1,
|
||||||
relaxed_start.strftime("%H:%M"),
|
relaxed_start.strftime("%H:%M"),
|
||||||
relaxed_end.strftime("%H:%M"),
|
relaxed_end.strftime("%H:%M"),
|
||||||
)
|
)
|
||||||
else:
|
continue
|
||||||
# Merge with all adjacent/overlapping periods
|
|
||||||
# Start with the new relaxed period
|
|
||||||
merged_period = relaxed.copy()
|
|
||||||
|
|
||||||
# Remove old periods (in reverse order to maintain indices)
|
# Quality Gate: Check if merging would create a period that's too heterogeneous
|
||||||
for idx, existing in reversed(periods_to_merge):
|
should_merge = _check_merge_quality_gate(periods_to_merge, relaxed)
|
||||||
merged_period = merge_adjacent_periods(existing, merged_period)
|
|
||||||
merged.pop(idx)
|
|
||||||
|
|
||||||
# Add the merged result
|
if not should_merge:
|
||||||
merged.append(merged_period)
|
# Don't merge - add as separate period instead
|
||||||
|
merged.append(relaxed)
|
||||||
|
periods_added += 1
|
||||||
|
_LOGGER_DETAILS.debug(
|
||||||
|
"%sAdded new period %s-%s separately (merge blocked by CV gate)",
|
||||||
|
INDENT_L1,
|
||||||
|
relaxed_start.strftime("%H:%M"),
|
||||||
|
relaxed_end.strftime("%H:%M"),
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
# Count as added if we merged exactly one existing period
|
# Merge with all adjacent/overlapping periods
|
||||||
# (means we extended/merged, not replaced multiple)
|
# Start with the new relaxed period
|
||||||
if len(periods_to_merge) == 1:
|
merged_period = relaxed.copy()
|
||||||
periods_added += 1
|
|
||||||
|
# Remove old periods (in reverse order to maintain indices)
|
||||||
|
for idx, existing in reversed(periods_to_merge):
|
||||||
|
merged_period = merge_adjacent_periods(existing, merged_period)
|
||||||
|
merged.pop(idx)
|
||||||
|
|
||||||
|
# Add the merged result
|
||||||
|
merged.append(merged_period)
|
||||||
|
|
||||||
|
# Count as added if we merged exactly one existing period
|
||||||
|
# (means we extended/merged, not replaced multiple)
|
||||||
|
if len(periods_to_merge) == 1:
|
||||||
|
periods_added += 1
|
||||||
|
|
||||||
# Sort all periods by start time
|
# Sort all periods by start time
|
||||||
merged.sort(key=lambda p: p["start"])
|
merged.sort(key=lambda p: p["start"])
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,7 @@ from custom_components.tibber_prices.utils.average import calculate_median
|
||||||
from custom_components.tibber_prices.utils.price import (
|
from custom_components.tibber_prices.utils.price import (
|
||||||
aggregate_period_levels,
|
aggregate_period_levels,
|
||||||
aggregate_period_ratings,
|
aggregate_period_ratings,
|
||||||
|
calculate_coefficient_of_variation,
|
||||||
calculate_volatility_level,
|
calculate_volatility_level,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -169,6 +170,7 @@ def build_period_summary_dict(
|
||||||
"price_min": stats.price_min,
|
"price_min": stats.price_min,
|
||||||
"price_max": stats.price_max,
|
"price_max": stats.price_max,
|
||||||
"price_spread": stats.price_spread,
|
"price_spread": stats.price_spread,
|
||||||
|
"price_coefficient_variation_%": stats.coefficient_of_variation,
|
||||||
"volatility": stats.volatility,
|
"volatility": stats.volatility,
|
||||||
# 4. Price differences will be added below if available
|
# 4. Price differences will be added below if available
|
||||||
# 5. Detail information (additional context)
|
# 5. Detail information (additional context)
|
||||||
|
|
@ -314,7 +316,10 @@ def extract_period_summaries(
|
||||||
# Extract prices for volatility calculation (coefficient of variation)
|
# Extract prices for volatility calculation (coefficient of variation)
|
||||||
prices_for_volatility = [float(p["total"]) for p in period_price_data if "total" in p]
|
prices_for_volatility = [float(p["total"]) for p in period_price_data if "total" in p]
|
||||||
|
|
||||||
# Calculate volatility (categorical) and aggregated rating difference (numeric)
|
# Calculate CV (numeric) for quality gate checks
|
||||||
|
period_cv = calculate_coefficient_of_variation(prices_for_volatility)
|
||||||
|
|
||||||
|
# Calculate volatility (categorical) using thresholds
|
||||||
volatility = calculate_volatility_level(
|
volatility = calculate_volatility_level(
|
||||||
prices_for_volatility,
|
prices_for_volatility,
|
||||||
threshold_moderate=thresholds.threshold_volatility_moderate,
|
threshold_moderate=thresholds.threshold_volatility_moderate,
|
||||||
|
|
@ -348,6 +353,7 @@ def extract_period_summaries(
|
||||||
price_max=price_stats["price_max"],
|
price_max=price_stats["price_max"],
|
||||||
price_spread=price_stats["price_spread"],
|
price_spread=price_stats["price_spread"],
|
||||||
volatility=volatility,
|
volatility=volatility,
|
||||||
|
coefficient_of_variation=round(period_cv, 1) if period_cv is not None else None,
|
||||||
period_price_diff=period_price_diff,
|
period_price_diff=period_price_diff,
|
||||||
period_price_diff_pct=period_price_diff_pct,
|
period_price_diff_pct=period_price_diff_pct,
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,7 @@ if TYPE_CHECKING:
|
||||||
|
|
||||||
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
||||||
|
|
||||||
from .types import TibberPricesPeriodConfig
|
from custom_components.tibber_prices.utils.price import calculate_coefficient_of_variation
|
||||||
|
|
||||||
from .period_overlap import (
|
from .period_overlap import (
|
||||||
recalculate_period_metadata,
|
recalculate_period_metadata,
|
||||||
|
|
@ -21,6 +21,8 @@ from .types import (
|
||||||
INDENT_L0,
|
INDENT_L0,
|
||||||
INDENT_L1,
|
INDENT_L1,
|
||||||
INDENT_L2,
|
INDENT_L2,
|
||||||
|
PERIOD_MAX_CV,
|
||||||
|
TibberPricesPeriodConfig,
|
||||||
)
|
)
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
_LOGGER = logging.getLogger(__name__)
|
||||||
|
|
@ -32,6 +34,125 @@ FLEX_WARNING_THRESHOLD_RELAXATION = 0.25 # 25% - INFO: suggest lowering to 15-2
|
||||||
MAX_FLEX_HARD_LIMIT = 0.50 # 50% - hard maximum flex value
|
MAX_FLEX_HARD_LIMIT = 0.50 # 50% - hard maximum flex value
|
||||||
FLEX_HIGH_THRESHOLD_RELAXATION = 0.30 # 30% - WARNING: base flex too high for relaxation mode
|
FLEX_HIGH_THRESHOLD_RELAXATION = 0.30 # 30% - WARNING: base flex too high for relaxation mode
|
||||||
|
|
||||||
|
# Min duration fallback constants
|
||||||
|
# When all relaxation phases are exhausted and still no periods found,
|
||||||
|
# gradually reduce min_period_length to find at least something
|
||||||
|
MIN_DURATION_FALLBACK_MINIMUM = 30 # Minimum period length to try (30 min = 2 intervals)
|
||||||
|
MIN_DURATION_FALLBACK_STEP = 15 # Reduce by 15 min (1 interval) each step
|
||||||
|
|
||||||
|
|
||||||
|
def _check_period_quality(
|
||||||
|
period: dict, all_prices: list[dict], *, time: TibberPricesTimeService
|
||||||
|
) -> tuple[bool, float | None]:
|
||||||
|
"""
|
||||||
|
Check if a period passes the quality gate (internal CV not too high).
|
||||||
|
|
||||||
|
The Quality Gate prevents relaxation from creating periods with too much
|
||||||
|
internal price variation. A "best price period" with prices ranging from
|
||||||
|
0.5 to 1.0 kr/kWh is not useful - user can't trust it's actually "best".
|
||||||
|
|
||||||
|
Args:
|
||||||
|
period: Period summary dict with "start" and "end" datetime
|
||||||
|
all_prices: All price intervals (to look up prices for CV calculation)
|
||||||
|
time: Time service for interval time parsing
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (passes_quality_gate, cv_value)
|
||||||
|
- passes_quality_gate: True if CV <= PERIOD_MAX_CV
|
||||||
|
- cv_value: Calculated CV as percentage, or None if not calculable
|
||||||
|
|
||||||
|
"""
|
||||||
|
start_time = period.get("start")
|
||||||
|
end_time = period.get("end")
|
||||||
|
|
||||||
|
if not start_time or not end_time:
|
||||||
|
return True, None # Can't check, assume OK
|
||||||
|
|
||||||
|
# Build lookup for prices
|
||||||
|
price_lookup: dict[str, float] = {}
|
||||||
|
for price_data in all_prices:
|
||||||
|
interval_time = time.get_interval_time(price_data)
|
||||||
|
if interval_time:
|
||||||
|
price_lookup[interval_time.isoformat()] = float(price_data["total"])
|
||||||
|
|
||||||
|
# Collect prices within the period
|
||||||
|
period_prices: list[float] = []
|
||||||
|
interval_duration = time.get_interval_duration()
|
||||||
|
|
||||||
|
current = start_time
|
||||||
|
while current < end_time:
|
||||||
|
price = price_lookup.get(current.isoformat())
|
||||||
|
if price is not None:
|
||||||
|
period_prices.append(price)
|
||||||
|
current = current + interval_duration
|
||||||
|
|
||||||
|
# Need at least 2 prices to calculate CV (same as MIN_PRICES_FOR_VOLATILITY in price.py)
|
||||||
|
min_prices_for_cv = 2
|
||||||
|
if len(period_prices) < min_prices_for_cv:
|
||||||
|
return True, None # Too few prices to calculate CV
|
||||||
|
|
||||||
|
cv = calculate_coefficient_of_variation(period_prices)
|
||||||
|
if cv is None:
|
||||||
|
return True, None
|
||||||
|
|
||||||
|
passes = cv <= PERIOD_MAX_CV
|
||||||
|
return passes, cv
|
||||||
|
|
||||||
|
|
||||||
|
def _count_quality_periods(
|
||||||
|
periods: list[dict],
|
||||||
|
all_prices: list[dict],
|
||||||
|
prices_by_day: dict[date, list[dict]],
|
||||||
|
min_periods: int,
|
||||||
|
*,
|
||||||
|
time: TibberPricesTimeService,
|
||||||
|
) -> tuple[int, int]:
|
||||||
|
"""
|
||||||
|
Count days meeting requirement when considering quality gate.
|
||||||
|
|
||||||
|
Only periods passing the quality gate (CV <= PERIOD_MAX_CV) are counted
|
||||||
|
towards meeting the min_periods requirement.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
periods: List of all periods
|
||||||
|
all_prices: All price intervals
|
||||||
|
prices_by_day: Price intervals grouped by day
|
||||||
|
min_periods: Target periods per day
|
||||||
|
time: Time service
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (days_meeting_requirement, total_quality_periods)
|
||||||
|
|
||||||
|
"""
|
||||||
|
periods_by_day = group_periods_by_day(periods)
|
||||||
|
days_meeting_requirement = 0
|
||||||
|
total_quality_periods = 0
|
||||||
|
|
||||||
|
for day in sorted(prices_by_day.keys()):
|
||||||
|
day_periods = periods_by_day.get(day, [])
|
||||||
|
quality_count = 0
|
||||||
|
|
||||||
|
for period in day_periods:
|
||||||
|
passes, cv = _check_period_quality(period, all_prices, time=time)
|
||||||
|
if passes:
|
||||||
|
quality_count += 1
|
||||||
|
else:
|
||||||
|
_LOGGER_DETAILS.debug(
|
||||||
|
"%s Day %s: Period %s-%s REJECTED by quality gate (CV=%.1f%% > %.1f%%)",
|
||||||
|
INDENT_L2,
|
||||||
|
day,
|
||||||
|
period.get("start", "?").strftime("%H:%M") if hasattr(period.get("start"), "strftime") else "?",
|
||||||
|
period.get("end", "?").strftime("%H:%M") if hasattr(period.get("end"), "strftime") else "?",
|
||||||
|
cv or 0,
|
||||||
|
PERIOD_MAX_CV,
|
||||||
|
)
|
||||||
|
|
||||||
|
total_quality_periods += quality_count
|
||||||
|
if quality_count >= min_periods:
|
||||||
|
days_meeting_requirement += 1
|
||||||
|
|
||||||
|
return days_meeting_requirement, total_quality_periods
|
||||||
|
|
||||||
|
|
||||||
def group_periods_by_day(periods: list[dict]) -> dict[date, list[dict]]:
|
def group_periods_by_day(periods: list[dict]) -> dict[date, list[dict]]:
|
||||||
"""
|
"""
|
||||||
|
|
@ -137,7 +258,167 @@ def group_prices_by_day(all_prices: list[dict], *, time: TibberPricesTimeService
|
||||||
return prices_by_day
|
return prices_by_day
|
||||||
|
|
||||||
|
|
||||||
def calculate_periods_with_relaxation( # noqa: PLR0913, PLR0915 - Per-day relaxation requires many parameters and statements
|
def _try_min_duration_fallback(
|
||||||
|
*,
|
||||||
|
config: TibberPricesPeriodConfig,
|
||||||
|
existing_periods: list[dict],
|
||||||
|
prices_by_day: dict[date, list[dict]],
|
||||||
|
time: TibberPricesTimeService,
|
||||||
|
) -> tuple[dict[str, Any] | None, dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Try reducing min_period_length to find periods when relaxation is exhausted.
|
||||||
|
|
||||||
|
This is a LAST RESORT mechanism. It only activates when:
|
||||||
|
1. All relaxation phases have been tried
|
||||||
|
2. Some days STILL have zero periods (not just below min_periods)
|
||||||
|
|
||||||
|
The fallback progressively reduces min_period_length:
|
||||||
|
- 60 min (default) → 45 min → 30 min (minimum)
|
||||||
|
|
||||||
|
It does NOT reduce below 30 min (2 intervals) because a single 15-min
|
||||||
|
interval is essentially just the daily min/max price - not a "period".
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config: Period configuration
|
||||||
|
existing_periods: Periods found so far (from relaxation)
|
||||||
|
prices_by_day: Price intervals grouped by day
|
||||||
|
time: Time service instance
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (result dict with periods, metadata dict) or (None, empty metadata)
|
||||||
|
|
||||||
|
"""
|
||||||
|
from .core import calculate_periods # noqa: PLC0415 - Avoid circular import
|
||||||
|
|
||||||
|
metadata: dict[str, Any] = {"phases_used": [], "fallback_active": False}
|
||||||
|
|
||||||
|
# Only try fallback if current min_period_length > minimum
|
||||||
|
if config.min_period_length <= MIN_DURATION_FALLBACK_MINIMUM:
|
||||||
|
return None, metadata
|
||||||
|
|
||||||
|
# Check which days have ZERO periods (not just below target)
|
||||||
|
existing_by_day = group_periods_by_day(existing_periods)
|
||||||
|
days_with_zero_periods = [day for day in prices_by_day if not existing_by_day.get(day)]
|
||||||
|
|
||||||
|
if not days_with_zero_periods:
|
||||||
|
_LOGGER_DETAILS.debug(
|
||||||
|
"%sMin duration fallback: All days have at least one period - no fallback needed",
|
||||||
|
INDENT_L1,
|
||||||
|
)
|
||||||
|
return None, metadata
|
||||||
|
|
||||||
|
_LOGGER.info(
|
||||||
|
"Min duration fallback: %d day(s) have zero periods, trying shorter min_period_length...",
|
||||||
|
len(days_with_zero_periods),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Try progressively shorter min_period_length
|
||||||
|
current_min_duration = config.min_period_length
|
||||||
|
fallback_periods: list[dict] = []
|
||||||
|
|
||||||
|
while current_min_duration > MIN_DURATION_FALLBACK_MINIMUM:
|
||||||
|
current_min_duration = max(
|
||||||
|
current_min_duration - MIN_DURATION_FALLBACK_STEP,
|
||||||
|
MIN_DURATION_FALLBACK_MINIMUM,
|
||||||
|
)
|
||||||
|
|
||||||
|
_LOGGER_DETAILS.debug(
|
||||||
|
"%sTrying min_period_length=%d min for days with zero periods",
|
||||||
|
INDENT_L2,
|
||||||
|
current_min_duration,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create modified config with shorter min_period_length
|
||||||
|
# Use maxed-out flex (50%) since we're in fallback mode
|
||||||
|
fallback_config = TibberPricesPeriodConfig(
|
||||||
|
reverse_sort=config.reverse_sort,
|
||||||
|
flex=MAX_FLEX_HARD_LIMIT, # Max flex
|
||||||
|
min_distance_from_avg=0, # Disable min_distance in fallback
|
||||||
|
min_period_length=current_min_duration,
|
||||||
|
threshold_low=config.threshold_low,
|
||||||
|
threshold_high=config.threshold_high,
|
||||||
|
threshold_volatility_moderate=config.threshold_volatility_moderate,
|
||||||
|
threshold_volatility_high=config.threshold_volatility_high,
|
||||||
|
threshold_volatility_very_high=config.threshold_volatility_very_high,
|
||||||
|
level_filter=None, # Disable level filter
|
||||||
|
gap_count=config.gap_count,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Try to find periods for days with zero periods
|
||||||
|
for day in days_with_zero_periods:
|
||||||
|
day_prices = prices_by_day.get(day, [])
|
||||||
|
if not day_prices:
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
day_result = calculate_periods(
|
||||||
|
day_prices,
|
||||||
|
config=fallback_config,
|
||||||
|
time=time,
|
||||||
|
)
|
||||||
|
|
||||||
|
day_periods = day_result.get("periods", [])
|
||||||
|
if day_periods:
|
||||||
|
# Mark periods with fallback metadata
|
||||||
|
for period in day_periods:
|
||||||
|
period["duration_fallback_active"] = True
|
||||||
|
period["duration_fallback_min_length"] = current_min_duration
|
||||||
|
period["relaxation_active"] = True
|
||||||
|
period["relaxation_level"] = f"duration_fallback={current_min_duration}min"
|
||||||
|
|
||||||
|
fallback_periods.extend(day_periods)
|
||||||
|
_LOGGER.info(
|
||||||
|
"Min duration fallback: Found %d period(s) for %s at min_length=%d min",
|
||||||
|
len(day_periods),
|
||||||
|
day,
|
||||||
|
current_min_duration,
|
||||||
|
)
|
||||||
|
|
||||||
|
except (KeyError, ValueError, TypeError) as err:
|
||||||
|
_LOGGER.warning(
|
||||||
|
"Error during min duration fallback for %s: %s",
|
||||||
|
day,
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# If we found periods for all zero-period days, we can stop
|
||||||
|
if fallback_periods:
|
||||||
|
# Remove days that now have periods from the list
|
||||||
|
fallback_by_day = group_periods_by_day(fallback_periods)
|
||||||
|
days_with_zero_periods = [day for day in days_with_zero_periods if not fallback_by_day.get(day)]
|
||||||
|
|
||||||
|
if not days_with_zero_periods:
|
||||||
|
break
|
||||||
|
|
||||||
|
if fallback_periods:
|
||||||
|
# Merge with existing periods
|
||||||
|
# resolve_period_overlaps merges adjacent/overlapping periods
|
||||||
|
merged_periods, _new_count = resolve_period_overlaps(
|
||||||
|
existing_periods,
|
||||||
|
fallback_periods,
|
||||||
|
)
|
||||||
|
recalculate_period_metadata(merged_periods, time=time)
|
||||||
|
|
||||||
|
metadata["fallback_active"] = True
|
||||||
|
metadata["phases_used"] = [f"duration_fallback (min_length={current_min_duration}min)"]
|
||||||
|
|
||||||
|
_LOGGER.info(
|
||||||
|
"Min duration fallback complete: Added %d period(s), total now %d",
|
||||||
|
len(fallback_periods),
|
||||||
|
len(merged_periods),
|
||||||
|
)
|
||||||
|
|
||||||
|
return {"periods": merged_periods}, metadata
|
||||||
|
|
||||||
|
_LOGGER.warning(
|
||||||
|
"Min duration fallback: Still %d day(s) with zero periods after trying all durations",
|
||||||
|
len(days_with_zero_periods),
|
||||||
|
)
|
||||||
|
return None, metadata
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_periods_with_relaxation( # noqa: PLR0912, PLR0913, PLR0915 - Per-day relaxation requires many parameters and branches
|
||||||
all_prices: list[dict],
|
all_prices: list[dict],
|
||||||
*,
|
*,
|
||||||
config: TibberPricesPeriodConfig,
|
config: TibberPricesPeriodConfig,
|
||||||
|
|
@ -185,6 +466,9 @@ def calculate_periods_with_relaxation( # noqa: PLR0913, PLR0915 - Per-day relax
|
||||||
from .core import ( # noqa: PLC0415
|
from .core import ( # noqa: PLC0415
|
||||||
calculate_periods,
|
calculate_periods,
|
||||||
)
|
)
|
||||||
|
from .period_building import ( # noqa: PLC0415
|
||||||
|
filter_superseded_periods,
|
||||||
|
)
|
||||||
|
|
||||||
# Compact INFO-level summary
|
# Compact INFO-level summary
|
||||||
period_type = "PEAK PRICE" if config.reverse_sort else "BEST PRICE"
|
period_type = "PEAK PRICE" if config.reverse_sort else "BEST PRICE"
|
||||||
|
|
@ -338,6 +622,37 @@ def calculate_periods_with_relaxation( # noqa: PLR0913, PLR0915 - Per-day relax
|
||||||
period_count = len(day_periods)
|
period_count = len(day_periods)
|
||||||
if period_count >= min_periods:
|
if period_count >= min_periods:
|
||||||
days_meeting_requirement += 1
|
days_meeting_requirement += 1
|
||||||
|
|
||||||
|
# === MIN DURATION FALLBACK ===
|
||||||
|
# If still no periods after relaxation, try reducing min_period_length
|
||||||
|
# This is a last resort to ensure users always get SOME period
|
||||||
|
if days_meeting_requirement < total_days and config.min_period_length > MIN_DURATION_FALLBACK_MINIMUM:
|
||||||
|
_LOGGER.info(
|
||||||
|
"Relaxation incomplete (%d/%d days). Trying min_duration fallback...",
|
||||||
|
days_meeting_requirement,
|
||||||
|
total_days,
|
||||||
|
)
|
||||||
|
|
||||||
|
fallback_result, fallback_metadata = _try_min_duration_fallback(
|
||||||
|
config=config,
|
||||||
|
existing_periods=all_periods,
|
||||||
|
prices_by_day=prices_by_day,
|
||||||
|
time=time,
|
||||||
|
)
|
||||||
|
|
||||||
|
if fallback_result:
|
||||||
|
all_periods = fallback_result["periods"]
|
||||||
|
all_phases_used.extend(fallback_metadata.get("phases_used", []))
|
||||||
|
|
||||||
|
# Recount after fallback
|
||||||
|
periods_by_day = group_periods_by_day(all_periods)
|
||||||
|
days_meeting_requirement = 0
|
||||||
|
for day in sorted(prices_by_day.keys()):
|
||||||
|
day_periods = periods_by_day.get(day, [])
|
||||||
|
period_count = len(day_periods)
|
||||||
|
if period_count >= min_periods:
|
||||||
|
days_meeting_requirement += 1
|
||||||
|
|
||||||
elif enable_relaxation:
|
elif enable_relaxation:
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"%sAll %d days met target with baseline - no relaxation needed",
|
"%sAll %d days met target with baseline - no relaxation needed",
|
||||||
|
|
@ -351,6 +666,14 @@ def calculate_periods_with_relaxation( # noqa: PLR0913, PLR0915 - Per-day relax
|
||||||
# Recalculate metadata for combined periods
|
# Recalculate metadata for combined periods
|
||||||
recalculate_period_metadata(all_periods, time=time)
|
recalculate_period_metadata(all_periods, time=time)
|
||||||
|
|
||||||
|
# Apply cross-day supersession filter (only for best-price periods)
|
||||||
|
# This removes late-night today periods that are superseded by better tomorrow alternatives
|
||||||
|
all_periods = filter_superseded_periods(
|
||||||
|
all_periods,
|
||||||
|
time=time,
|
||||||
|
reverse_sort=config.reverse_sort,
|
||||||
|
)
|
||||||
|
|
||||||
# Build final result
|
# Build final result
|
||||||
final_result = baseline_result.copy()
|
final_result = baseline_result.copy()
|
||||||
final_result["periods"] = all_periods
|
final_result["periods"] = all_periods
|
||||||
|
|
@ -491,23 +814,11 @@ def relax_all_prices( # noqa: PLR0913 - Comprehensive filter relaxation require
|
||||||
new_relaxed_periods=new_periods,
|
new_relaxed_periods=new_periods,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Count periods per day to check if requirement met
|
# Count periods per day with QUALITY GATE check
|
||||||
periods_by_day = group_periods_by_day(combined)
|
# Only periods with CV <= PERIOD_MAX_CV count towards min_periods requirement
|
||||||
days_meeting_requirement = 0
|
days_meeting_requirement, quality_period_count = _count_quality_periods(
|
||||||
|
combined, all_prices, prices_by_day, min_periods, time=time
|
||||||
for day in sorted(prices_by_day.keys()):
|
)
|
||||||
day_periods = periods_by_day.get(day, [])
|
|
||||||
period_count = len(day_periods)
|
|
||||||
if period_count >= min_periods:
|
|
||||||
days_meeting_requirement += 1
|
|
||||||
|
|
||||||
_LOGGER_DETAILS.debug(
|
|
||||||
"%s Day %s: %d periods%s",
|
|
||||||
INDENT_L2,
|
|
||||||
day,
|
|
||||||
period_count,
|
|
||||||
" ✓" if period_count >= min_periods else f" (need {min_periods})",
|
|
||||||
)
|
|
||||||
|
|
||||||
total_periods = len(combined)
|
total_periods = len(combined)
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
|
|
|
||||||
|
|
@ -15,6 +15,24 @@ from custom_components.tibber_prices.const import (
|
||||||
DEFAULT_VOLATILITY_THRESHOLD_VERY_HIGH,
|
DEFAULT_VOLATILITY_THRESHOLD_VERY_HIGH,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Quality Gate: Maximum coefficient of variation (CV) allowed within a period
|
||||||
|
# Periods with internal CV above this are considered too heterogeneous for "best price"
|
||||||
|
# A 25% CV means the std dev is 25% of the mean - beyond this, prices vary too much
|
||||||
|
# Example: Period with prices 0.7-0.99 kr has ~15% CV which is acceptable
|
||||||
|
# Period with prices 0.5-1.0 kr has ~30% CV which would be rejected
|
||||||
|
PERIOD_MAX_CV = 25.0 # 25% max coefficient of variation within a period
|
||||||
|
|
||||||
|
# Cross-Day Extension: Time window constants
|
||||||
|
# When a period ends late in the day and tomorrow data is available,
|
||||||
|
# we can extend it past midnight if prices remain favorable
|
||||||
|
CROSS_DAY_LATE_PERIOD_START_HOUR = 20 # Consider periods starting at 20:00 or later for extension
|
||||||
|
CROSS_DAY_MAX_EXTENSION_HOUR = 8 # Don't extend beyond 08:00 next day (covers typical night low)
|
||||||
|
|
||||||
|
# Cross-Day Supersession: When tomorrow data arrives, late-night periods that are
|
||||||
|
# worse than early-morning tomorrow periods become obsolete
|
||||||
|
# A today period is "superseded" if tomorrow has a significantly better alternative
|
||||||
|
SUPERSESSION_PRICE_IMPROVEMENT_PCT = 10.0 # Tomorrow must be at least 10% cheaper to supersede
|
||||||
|
|
||||||
# Log indentation levels for visual hierarchy
|
# Log indentation levels for visual hierarchy
|
||||||
INDENT_L0 = "" # Top level (calculate_periods_with_relaxation)
|
INDENT_L0 = "" # Top level (calculate_periods_with_relaxation)
|
||||||
INDENT_L1 = " " # Per-day loop
|
INDENT_L1 = " " # Per-day loop
|
||||||
|
|
@ -62,6 +80,7 @@ class TibberPricesPeriodStatistics(NamedTuple):
|
||||||
price_max: float
|
price_max: float
|
||||||
price_spread: float
|
price_spread: float
|
||||||
volatility: str
|
volatility: str
|
||||||
|
coefficient_of_variation: float | None # CV as percentage (e.g., 15.0 for 15%)
|
||||||
period_price_diff: float | None
|
period_price_diff: float | None
|
||||||
period_price_diff_pct: float | None
|
period_price_diff_pct: float | None
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -13,6 +13,8 @@ from typing import TYPE_CHECKING, Any
|
||||||
from custom_components.tibber_prices import const as _const
|
from custom_components.tibber_prices import const as _const
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
from collections.abc import Callable
|
||||||
|
|
||||||
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
||||||
from homeassistant.config_entries import ConfigEntry
|
from homeassistant.config_entries import ConfigEntry
|
||||||
|
|
||||||
|
|
@ -32,6 +34,7 @@ class TibberPricesPeriodCalculator:
|
||||||
self,
|
self,
|
||||||
config_entry: ConfigEntry,
|
config_entry: ConfigEntry,
|
||||||
log_prefix: str,
|
log_prefix: str,
|
||||||
|
get_config_override_fn: Callable[[str, str], Any | None] | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Initialize the period calculator."""
|
"""Initialize the period calculator."""
|
||||||
self.config_entry = config_entry
|
self.config_entry = config_entry
|
||||||
|
|
@ -39,11 +42,40 @@ class TibberPricesPeriodCalculator:
|
||||||
self.time: TibberPricesTimeService # Set by coordinator before first use
|
self.time: TibberPricesTimeService # Set by coordinator before first use
|
||||||
self._config_cache: dict[str, dict[str, Any]] | None = None
|
self._config_cache: dict[str, dict[str, Any]] | None = None
|
||||||
self._config_cache_valid = False
|
self._config_cache_valid = False
|
||||||
|
self._get_config_override = get_config_override_fn
|
||||||
|
|
||||||
# Period calculation cache
|
# Period calculation cache
|
||||||
self._cached_periods: dict[str, Any] | None = None
|
self._cached_periods: dict[str, Any] | None = None
|
||||||
self._last_periods_hash: str | None = None
|
self._last_periods_hash: str | None = None
|
||||||
|
|
||||||
|
def _get_option(
|
||||||
|
self,
|
||||||
|
config_key: str,
|
||||||
|
config_section: str,
|
||||||
|
default: Any,
|
||||||
|
) -> Any:
|
||||||
|
"""
|
||||||
|
Get a config option, checking overrides first.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config_key: The configuration key
|
||||||
|
config_section: The section in options (e.g., "flexibility_settings")
|
||||||
|
default: Default value if not set
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Override value if set, otherwise options value, otherwise default
|
||||||
|
|
||||||
|
"""
|
||||||
|
# Check overrides first
|
||||||
|
if self._get_config_override is not None:
|
||||||
|
override = self._get_config_override(config_key, config_section)
|
||||||
|
if override is not None:
|
||||||
|
return override
|
||||||
|
|
||||||
|
# Fall back to options
|
||||||
|
section = self.config_entry.options.get(config_section, {})
|
||||||
|
return section.get(config_key, default)
|
||||||
|
|
||||||
def _log(self, level: str, message: str, *args: object, **kwargs: object) -> None:
|
def _log(self, level: str, message: str, *args: object, **kwargs: object) -> None:
|
||||||
"""Log with calculator-specific prefix."""
|
"""Log with calculator-specific prefix."""
|
||||||
prefixed_message = f"{self._log_prefix} {message}"
|
prefixed_message = f"{self._log_prefix} {message}"
|
||||||
|
|
@ -112,7 +144,7 @@ class TibberPricesPeriodCalculator:
|
||||||
Get period calculation configuration from config options.
|
Get period calculation configuration from config options.
|
||||||
|
|
||||||
Uses cached config to avoid multiple options.get() calls.
|
Uses cached config to avoid multiple options.get() calls.
|
||||||
Cache is invalidated when config_entry.options change.
|
Cache is invalidated when config_entry.options change or override entities update.
|
||||||
"""
|
"""
|
||||||
cache_key = "peak" if reverse_sort else "best"
|
cache_key = "peak" if reverse_sort else "best"
|
||||||
|
|
||||||
|
|
@ -124,36 +156,44 @@ class TibberPricesPeriodCalculator:
|
||||||
if self._config_cache is None:
|
if self._config_cache is None:
|
||||||
self._config_cache = {}
|
self._config_cache = {}
|
||||||
|
|
||||||
options = self.config_entry.options
|
# Get config values, checking overrides first
|
||||||
|
|
||||||
# Get nested sections from options
|
|
||||||
# CRITICAL: Best/Peak price settings are stored in nested sections:
|
# CRITICAL: Best/Peak price settings are stored in nested sections:
|
||||||
# - period_settings: min_period_length, max_level, gap_count
|
# - period_settings: min_period_length, max_level, gap_count
|
||||||
# - flexibility_settings: flex, min_distance_from_avg
|
# - flexibility_settings: flex, min_distance_from_avg
|
||||||
# These settings are ONLY in options (not in data), structured since initial config flow
|
# Override entities can override any of these values at runtime
|
||||||
period_settings = options.get("period_settings", {})
|
|
||||||
flexibility_settings = options.get("flexibility_settings", {})
|
|
||||||
|
|
||||||
if reverse_sort:
|
if reverse_sort:
|
||||||
# Peak price configuration
|
# Peak price configuration
|
||||||
flex = flexibility_settings.get(_const.CONF_PEAK_PRICE_FLEX, _const.DEFAULT_PEAK_PRICE_FLEX)
|
flex = self._get_option(
|
||||||
min_distance_from_avg = flexibility_settings.get(
|
_const.CONF_PEAK_PRICE_FLEX,
|
||||||
|
"flexibility_settings",
|
||||||
|
_const.DEFAULT_PEAK_PRICE_FLEX,
|
||||||
|
)
|
||||||
|
min_distance_from_avg = self._get_option(
|
||||||
_const.CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
_const.CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||||
|
"flexibility_settings",
|
||||||
_const.DEFAULT_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
_const.DEFAULT_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||||
)
|
)
|
||||||
min_period_length = period_settings.get(
|
min_period_length = self._get_option(
|
||||||
_const.CONF_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
_const.CONF_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
||||||
|
"period_settings",
|
||||||
_const.DEFAULT_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
_const.DEFAULT_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# Best price configuration
|
# Best price configuration
|
||||||
flex = flexibility_settings.get(_const.CONF_BEST_PRICE_FLEX, _const.DEFAULT_BEST_PRICE_FLEX)
|
flex = self._get_option(
|
||||||
min_distance_from_avg = flexibility_settings.get(
|
_const.CONF_BEST_PRICE_FLEX,
|
||||||
|
"flexibility_settings",
|
||||||
|
_const.DEFAULT_BEST_PRICE_FLEX,
|
||||||
|
)
|
||||||
|
min_distance_from_avg = self._get_option(
|
||||||
_const.CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG,
|
_const.CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||||
|
"flexibility_settings",
|
||||||
_const.DEFAULT_BEST_PRICE_MIN_DISTANCE_FROM_AVG,
|
_const.DEFAULT_BEST_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||||
)
|
)
|
||||||
min_period_length = period_settings.get(
|
min_period_length = self._get_option(
|
||||||
_const.CONF_BEST_PRICE_MIN_PERIOD_LENGTH,
|
_const.CONF_BEST_PRICE_MIN_PERIOD_LENGTH,
|
||||||
|
"period_settings",
|
||||||
_const.DEFAULT_BEST_PRICE_MIN_PERIOD_LENGTH,
|
_const.DEFAULT_BEST_PRICE_MIN_PERIOD_LENGTH,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -610,9 +650,10 @@ class TibberPricesPeriodCalculator:
|
||||||
|
|
||||||
# Get relaxation configuration for best price
|
# Get relaxation configuration for best price
|
||||||
# CRITICAL: Relaxation settings are stored in nested section 'relaxation_and_target_periods'
|
# CRITICAL: Relaxation settings are stored in nested section 'relaxation_and_target_periods'
|
||||||
relaxation_and_target_periods = self.config_entry.options.get("relaxation_and_target_periods", {})
|
# Override entities can override any of these values at runtime
|
||||||
enable_relaxation_best = relaxation_and_target_periods.get(
|
enable_relaxation_best = self._get_option(
|
||||||
_const.CONF_ENABLE_MIN_PERIODS_BEST,
|
_const.CONF_ENABLE_MIN_PERIODS_BEST,
|
||||||
|
"relaxation_and_target_periods",
|
||||||
_const.DEFAULT_ENABLE_MIN_PERIODS_BEST,
|
_const.DEFAULT_ENABLE_MIN_PERIODS_BEST,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -623,12 +664,14 @@ class TibberPricesPeriodCalculator:
|
||||||
show_best_price = bool(all_prices)
|
show_best_price = bool(all_prices)
|
||||||
else:
|
else:
|
||||||
show_best_price = self.should_show_periods(price_info, reverse_sort=False) if all_prices else False
|
show_best_price = self.should_show_periods(price_info, reverse_sort=False) if all_prices else False
|
||||||
min_periods_best = relaxation_and_target_periods.get(
|
min_periods_best = self._get_option(
|
||||||
_const.CONF_MIN_PERIODS_BEST,
|
_const.CONF_MIN_PERIODS_BEST,
|
||||||
|
"relaxation_and_target_periods",
|
||||||
_const.DEFAULT_MIN_PERIODS_BEST,
|
_const.DEFAULT_MIN_PERIODS_BEST,
|
||||||
)
|
)
|
||||||
relaxation_attempts_best = relaxation_and_target_periods.get(
|
relaxation_attempts_best = self._get_option(
|
||||||
_const.CONF_RELAXATION_ATTEMPTS_BEST,
|
_const.CONF_RELAXATION_ATTEMPTS_BEST,
|
||||||
|
"relaxation_and_target_periods",
|
||||||
_const.DEFAULT_RELAXATION_ATTEMPTS_BEST,
|
_const.DEFAULT_RELAXATION_ATTEMPTS_BEST,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -637,13 +680,14 @@ class TibberPricesPeriodCalculator:
|
||||||
best_config = self.get_period_config(reverse_sort=False)
|
best_config = self.get_period_config(reverse_sort=False)
|
||||||
# Get level filter configuration from period_settings section
|
# Get level filter configuration from period_settings section
|
||||||
# CRITICAL: max_level and gap_count are stored in nested section 'period_settings'
|
# CRITICAL: max_level and gap_count are stored in nested section 'period_settings'
|
||||||
period_settings = self.config_entry.options.get("period_settings", {})
|
max_level_best = self._get_option(
|
||||||
max_level_best = period_settings.get(
|
|
||||||
_const.CONF_BEST_PRICE_MAX_LEVEL,
|
_const.CONF_BEST_PRICE_MAX_LEVEL,
|
||||||
|
"period_settings",
|
||||||
_const.DEFAULT_BEST_PRICE_MAX_LEVEL,
|
_const.DEFAULT_BEST_PRICE_MAX_LEVEL,
|
||||||
)
|
)
|
||||||
gap_count_best = period_settings.get(
|
gap_count_best = self._get_option(
|
||||||
_const.CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
_const.CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||||
|
"period_settings",
|
||||||
_const.DEFAULT_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
_const.DEFAULT_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||||
)
|
)
|
||||||
best_period_config = TibberPricesPeriodConfig(
|
best_period_config = TibberPricesPeriodConfig(
|
||||||
|
|
@ -687,8 +731,10 @@ class TibberPricesPeriodCalculator:
|
||||||
|
|
||||||
# Get relaxation configuration for peak price
|
# Get relaxation configuration for peak price
|
||||||
# CRITICAL: Relaxation settings are stored in nested section 'relaxation_and_target_periods'
|
# CRITICAL: Relaxation settings are stored in nested section 'relaxation_and_target_periods'
|
||||||
enable_relaxation_peak = relaxation_and_target_periods.get(
|
# Override entities can override any of these values at runtime
|
||||||
|
enable_relaxation_peak = self._get_option(
|
||||||
_const.CONF_ENABLE_MIN_PERIODS_PEAK,
|
_const.CONF_ENABLE_MIN_PERIODS_PEAK,
|
||||||
|
"relaxation_and_target_periods",
|
||||||
_const.DEFAULT_ENABLE_MIN_PERIODS_PEAK,
|
_const.DEFAULT_ENABLE_MIN_PERIODS_PEAK,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -699,12 +745,14 @@ class TibberPricesPeriodCalculator:
|
||||||
show_peak_price = bool(all_prices)
|
show_peak_price = bool(all_prices)
|
||||||
else:
|
else:
|
||||||
show_peak_price = self.should_show_periods(price_info, reverse_sort=True) if all_prices else False
|
show_peak_price = self.should_show_periods(price_info, reverse_sort=True) if all_prices else False
|
||||||
min_periods_peak = relaxation_and_target_periods.get(
|
min_periods_peak = self._get_option(
|
||||||
_const.CONF_MIN_PERIODS_PEAK,
|
_const.CONF_MIN_PERIODS_PEAK,
|
||||||
|
"relaxation_and_target_periods",
|
||||||
_const.DEFAULT_MIN_PERIODS_PEAK,
|
_const.DEFAULT_MIN_PERIODS_PEAK,
|
||||||
)
|
)
|
||||||
relaxation_attempts_peak = relaxation_and_target_periods.get(
|
relaxation_attempts_peak = self._get_option(
|
||||||
_const.CONF_RELAXATION_ATTEMPTS_PEAK,
|
_const.CONF_RELAXATION_ATTEMPTS_PEAK,
|
||||||
|
"relaxation_and_target_periods",
|
||||||
_const.DEFAULT_RELAXATION_ATTEMPTS_PEAK,
|
_const.DEFAULT_RELAXATION_ATTEMPTS_PEAK,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -713,12 +761,14 @@ class TibberPricesPeriodCalculator:
|
||||||
peak_config = self.get_period_config(reverse_sort=True)
|
peak_config = self.get_period_config(reverse_sort=True)
|
||||||
# Get level filter configuration from period_settings section
|
# Get level filter configuration from period_settings section
|
||||||
# CRITICAL: min_level and gap_count are stored in nested section 'period_settings'
|
# CRITICAL: min_level and gap_count are stored in nested section 'period_settings'
|
||||||
min_level_peak = period_settings.get(
|
min_level_peak = self._get_option(
|
||||||
_const.CONF_PEAK_PRICE_MIN_LEVEL,
|
_const.CONF_PEAK_PRICE_MIN_LEVEL,
|
||||||
|
"period_settings",
|
||||||
_const.DEFAULT_PEAK_PRICE_MIN_LEVEL,
|
_const.DEFAULT_PEAK_PRICE_MIN_LEVEL,
|
||||||
)
|
)
|
||||||
gap_count_peak = period_settings.get(
|
gap_count_peak = self._get_option(
|
||||||
_const.CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
_const.CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||||
|
"period_settings",
|
||||||
_const.DEFAULT_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
_const.DEFAULT_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||||
)
|
)
|
||||||
peak_period_config = TibberPricesPeriodConfig(
|
peak_period_config = TibberPricesPeriodConfig(
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,631 @@
|
||||||
|
"""
|
||||||
|
Price data management for the coordinator.
|
||||||
|
|
||||||
|
This module manages all price-related data for the Tibber Prices integration:
|
||||||
|
|
||||||
|
**User Data** (fetched directly via API):
|
||||||
|
- Home metadata (name, address, timezone)
|
||||||
|
- Account info (subscription status)
|
||||||
|
- Currency settings
|
||||||
|
- Refreshed daily (24h interval)
|
||||||
|
|
||||||
|
**Price Data** (fetched via IntervalPool):
|
||||||
|
- Quarter-hourly price intervals
|
||||||
|
- Yesterday/today/tomorrow coverage
|
||||||
|
- The IntervalPool handles actual API fetching, deduplication, and caching
|
||||||
|
- This manager coordinates the data flow and user data refresh
|
||||||
|
|
||||||
|
Data flow:
|
||||||
|
Tibber API → IntervalPool → PriceDataManager → Coordinator → Sensors
|
||||||
|
↑ ↓
|
||||||
|
(actual fetching) (orchestration + user data)
|
||||||
|
|
||||||
|
Note: Price data is NOT cached in this module - IntervalPool is the single
|
||||||
|
source of truth. This module only caches user_data for daily refresh cycle.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from datetime import timedelta
|
||||||
|
from typing import TYPE_CHECKING, Any
|
||||||
|
|
||||||
|
from custom_components.tibber_prices.api import (
|
||||||
|
TibberPricesApiClientAuthenticationError,
|
||||||
|
TibberPricesApiClientCommunicationError,
|
||||||
|
TibberPricesApiClientError,
|
||||||
|
)
|
||||||
|
from homeassistant.exceptions import ConfigEntryAuthFailed
|
||||||
|
from homeassistant.helpers.update_coordinator import UpdateFailed
|
||||||
|
|
||||||
|
from . import cache, helpers
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from collections.abc import Callable
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
from custom_components.tibber_prices.api import TibberPricesApiClient
|
||||||
|
from custom_components.tibber_prices.interval_pool import TibberPricesIntervalPool
|
||||||
|
|
||||||
|
from .time_service import TibberPricesTimeService
|
||||||
|
|
||||||
|
_LOGGER = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Hour when Tibber publishes tomorrow's prices (around 13:00 local time)
|
||||||
|
# Before this hour, requesting tomorrow data will always fail → wasted API call
|
||||||
|
TOMORROW_DATA_AVAILABLE_HOUR = 13
|
||||||
|
|
||||||
|
|
||||||
|
class TibberPricesPriceDataManager:
|
||||||
|
"""
|
||||||
|
Manages price and user data for the coordinator.
|
||||||
|
|
||||||
|
Responsibilities:
|
||||||
|
- User data: Fetches directly via API, validates, caches with persistence
|
||||||
|
- Price data: Coordinates with IntervalPool (which does actual API fetching)
|
||||||
|
- Cache management: Loads/stores both data types to HA persistent storage
|
||||||
|
- Update decisions: Determines when fresh data is needed
|
||||||
|
|
||||||
|
Note: Despite the name, this class does NOT do the actual price fetching.
|
||||||
|
The IntervalPool handles API calls, deduplication, and interval management.
|
||||||
|
This class orchestrates WHEN to fetch and processes the results.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__( # noqa: PLR0913
|
||||||
|
self,
|
||||||
|
api: TibberPricesApiClient,
|
||||||
|
store: Any,
|
||||||
|
log_prefix: str,
|
||||||
|
user_update_interval: timedelta,
|
||||||
|
time: TibberPricesTimeService,
|
||||||
|
home_id: str,
|
||||||
|
interval_pool: TibberPricesIntervalPool,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Initialize the price data manager.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
api: API client for direct requests (user data only).
|
||||||
|
store: Home Assistant storage for persistence.
|
||||||
|
log_prefix: Prefix for log messages (e.g., "[Home Name]").
|
||||||
|
user_update_interval: How often to refresh user data (default: 1 day).
|
||||||
|
time: TimeService for time operations.
|
||||||
|
home_id: Home ID this manager is responsible for.
|
||||||
|
interval_pool: IntervalPool for price data (handles actual fetching).
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.api = api
|
||||||
|
self._store = store
|
||||||
|
self._log_prefix = log_prefix
|
||||||
|
self._user_update_interval = user_update_interval
|
||||||
|
self.time: TibberPricesTimeService = time
|
||||||
|
self.home_id = home_id
|
||||||
|
self._interval_pool = interval_pool
|
||||||
|
|
||||||
|
# Cached data (user data only - price data is in IntervalPool)
|
||||||
|
self._cached_user_data: dict[str, Any] | None = None
|
||||||
|
self._last_user_update: datetime | None = None
|
||||||
|
|
||||||
|
def _log(self, level: str, message: str, *args: object, **kwargs: object) -> None:
|
||||||
|
"""Log with coordinator-specific prefix."""
|
||||||
|
prefixed_message = f"{self._log_prefix} {message}"
|
||||||
|
getattr(_LOGGER, level)(prefixed_message, *args, **kwargs)
|
||||||
|
|
||||||
|
async def load_cache(self) -> None:
|
||||||
|
"""Load cached user data from storage (price data is in IntervalPool)."""
|
||||||
|
cache_data = await cache.load_cache(self._store, self._log_prefix, time=self.time)
|
||||||
|
|
||||||
|
self._cached_user_data = cache_data.user_data
|
||||||
|
self._last_user_update = cache_data.last_user_update
|
||||||
|
|
||||||
|
def should_fetch_tomorrow_data(
|
||||||
|
self,
|
||||||
|
current_price_info: list[dict[str, Any]] | None,
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Determine if tomorrow's data should be requested from the API.
|
||||||
|
|
||||||
|
This is the key intelligence that prevents API spam:
|
||||||
|
- Tibber publishes tomorrow's prices around 13:00 each day
|
||||||
|
- Before 13:00, requesting tomorrow data will always fail → wasted API call
|
||||||
|
- If we already have tomorrow data, no need to request it again
|
||||||
|
|
||||||
|
The decision logic:
|
||||||
|
1. Before 13:00 local time → Don't fetch (data not available yet)
|
||||||
|
2. After 13:00 AND tomorrow data already present → Don't fetch (already have it)
|
||||||
|
3. After 13:00 AND tomorrow data missing → Fetch (data should be available)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
current_price_info: List of price intervals from current coordinator data.
|
||||||
|
Used to check if tomorrow data already exists.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if tomorrow data should be requested, False otherwise.
|
||||||
|
|
||||||
|
"""
|
||||||
|
now = self.time.now()
|
||||||
|
now_local = self.time.as_local(now)
|
||||||
|
current_hour = now_local.hour
|
||||||
|
|
||||||
|
# Before TOMORROW_DATA_AVAILABLE_HOUR - tomorrow data not available yet
|
||||||
|
if current_hour < TOMORROW_DATA_AVAILABLE_HOUR:
|
||||||
|
self._log("debug", "Before %d:00 - not requesting tomorrow data", TOMORROW_DATA_AVAILABLE_HOUR)
|
||||||
|
return False
|
||||||
|
|
||||||
|
# After TOMORROW_DATA_AVAILABLE_HOUR - check if we already have tomorrow data
|
||||||
|
if current_price_info:
|
||||||
|
has_tomorrow = self.has_tomorrow_data(current_price_info)
|
||||||
|
if has_tomorrow:
|
||||||
|
self._log(
|
||||||
|
"debug", "After %d:00 but already have tomorrow data - not requesting", TOMORROW_DATA_AVAILABLE_HOUR
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
self._log("debug", "After %d:00 and tomorrow data missing - will request", TOMORROW_DATA_AVAILABLE_HOUR)
|
||||||
|
return True
|
||||||
|
|
||||||
|
# No current data - request tomorrow data if after TOMORROW_DATA_AVAILABLE_HOUR
|
||||||
|
self._log(
|
||||||
|
"debug", "After %d:00 with no current data - will request tomorrow data", TOMORROW_DATA_AVAILABLE_HOUR
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
async def store_cache(self, last_midnight_check: datetime | None = None) -> None:
|
||||||
|
"""Store cache data (user metadata only, price data is in IntervalPool)."""
|
||||||
|
cache_data = cache.TibberPricesCacheData(
|
||||||
|
user_data=self._cached_user_data,
|
||||||
|
last_user_update=self._last_user_update,
|
||||||
|
last_midnight_check=last_midnight_check,
|
||||||
|
)
|
||||||
|
await cache.save_cache(self._store, cache_data, self._log_prefix)
|
||||||
|
|
||||||
|
def _validate_user_data(self, user_data: dict, home_id: str) -> bool: # noqa: PLR0911
|
||||||
|
"""
|
||||||
|
Validate user data completeness.
|
||||||
|
|
||||||
|
Rejects incomplete/invalid data from API to prevent caching temporary errors.
|
||||||
|
Currency information is critical - if missing, we cannot safely calculate prices.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_data: User data dict from API.
|
||||||
|
home_id: Home ID to validate against.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if data is valid and complete, False otherwise.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if not user_data:
|
||||||
|
self._log("warning", "User data validation failed: Empty data")
|
||||||
|
return False
|
||||||
|
|
||||||
|
viewer = user_data.get("viewer")
|
||||||
|
if not viewer or not isinstance(viewer, dict):
|
||||||
|
self._log("warning", "User data validation failed: Missing or invalid viewer")
|
||||||
|
return False
|
||||||
|
|
||||||
|
homes = viewer.get("homes")
|
||||||
|
if not homes or not isinstance(homes, list) or len(homes) == 0:
|
||||||
|
self._log("warning", "User data validation failed: No homes found")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Find our home and validate it has required data
|
||||||
|
home_found = False
|
||||||
|
for home in homes:
|
||||||
|
if home.get("id") == home_id:
|
||||||
|
home_found = True
|
||||||
|
|
||||||
|
# Validate home has timezone (required for cursor calculation)
|
||||||
|
if not home.get("timeZone"):
|
||||||
|
self._log("warning", "User data validation failed: Home %s missing timezone", home_id)
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Currency is REQUIRED - we cannot function without it
|
||||||
|
# The currency is nested in currentSubscription.priceInfo.current.currency
|
||||||
|
subscription = home.get("currentSubscription")
|
||||||
|
if not subscription:
|
||||||
|
self._log(
|
||||||
|
"warning",
|
||||||
|
"User data validation failed: Home %s has no active subscription",
|
||||||
|
home_id,
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
price_info = subscription.get("priceInfo")
|
||||||
|
if not price_info:
|
||||||
|
self._log(
|
||||||
|
"warning",
|
||||||
|
"User data validation failed: Home %s subscription has no priceInfo",
|
||||||
|
home_id,
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
current = price_info.get("current")
|
||||||
|
if not current:
|
||||||
|
self._log(
|
||||||
|
"warning",
|
||||||
|
"User data validation failed: Home %s priceInfo has no current data",
|
||||||
|
home_id,
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
currency = current.get("currency")
|
||||||
|
if not currency:
|
||||||
|
self._log(
|
||||||
|
"warning",
|
||||||
|
"User data validation failed: Home %s has no currency",
|
||||||
|
home_id,
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
break
|
||||||
|
|
||||||
|
if not home_found:
|
||||||
|
self._log("warning", "User data validation failed: Home %s not found in homes list", home_id)
|
||||||
|
return False
|
||||||
|
|
||||||
|
self._log("debug", "User data validation passed for home %s", home_id)
|
||||||
|
return True
|
||||||
|
|
||||||
|
async def update_user_data_if_needed(self, current_time: datetime) -> bool:
|
||||||
|
"""
|
||||||
|
Update user data if needed (daily check).
|
||||||
|
|
||||||
|
Only accepts complete and valid data. If API returns incomplete data
|
||||||
|
(e.g., during maintenance), keeps existing cached data and retries later.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if user data was updated, False otherwise
|
||||||
|
|
||||||
|
"""
|
||||||
|
if self._last_user_update is None or current_time - self._last_user_update >= self._user_update_interval:
|
||||||
|
try:
|
||||||
|
self._log("debug", "Updating user data")
|
||||||
|
user_data = await self.api.async_get_viewer_details()
|
||||||
|
|
||||||
|
# Validate before caching
|
||||||
|
if not self._validate_user_data(user_data, self.home_id):
|
||||||
|
self._log(
|
||||||
|
"warning",
|
||||||
|
"Rejecting incomplete user data from API - keeping existing cached data",
|
||||||
|
)
|
||||||
|
return False # Keep existing data, don't update timestamp
|
||||||
|
|
||||||
|
# Data is valid, cache it
|
||||||
|
self._cached_user_data = user_data
|
||||||
|
self._last_user_update = current_time
|
||||||
|
self._log("debug", "User data updated successfully")
|
||||||
|
except (
|
||||||
|
TibberPricesApiClientError,
|
||||||
|
TibberPricesApiClientCommunicationError,
|
||||||
|
) as ex:
|
||||||
|
self._log("warning", "Failed to update user data: %s", ex)
|
||||||
|
return False # Update failed
|
||||||
|
else:
|
||||||
|
return True # User data was updated
|
||||||
|
return False # No update needed
|
||||||
|
|
||||||
|
async def fetch_home_data(
|
||||||
|
self,
|
||||||
|
home_id: str,
|
||||||
|
current_time: datetime,
|
||||||
|
*,
|
||||||
|
include_tomorrow: bool = True,
|
||||||
|
) -> tuple[dict[str, Any], bool]:
|
||||||
|
"""
|
||||||
|
Fetch data for a single home via pool.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
home_id: Home ID to fetch data for.
|
||||||
|
current_time: Current time for timestamp in result.
|
||||||
|
include_tomorrow: If True, request tomorrow's data too. If False,
|
||||||
|
only request up to end of today.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (data_dict, api_called):
|
||||||
|
- data_dict: Dictionary with timestamp, home_id, price_info, currency.
|
||||||
|
- api_called: True if API was called to fetch missing data.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if not home_id:
|
||||||
|
self._log("warning", "No home ID provided - cannot fetch price data")
|
||||||
|
return (
|
||||||
|
{
|
||||||
|
"timestamp": current_time,
|
||||||
|
"home_id": "",
|
||||||
|
"price_info": [],
|
||||||
|
"currency": "EUR",
|
||||||
|
},
|
||||||
|
False, # No API call made
|
||||||
|
)
|
||||||
|
|
||||||
|
# Ensure we have user_data before fetching price data
|
||||||
|
# This is critical for timezone-aware cursor calculation
|
||||||
|
if not self._cached_user_data:
|
||||||
|
self._log("info", "User data not cached, fetching before price data")
|
||||||
|
try:
|
||||||
|
user_data = await self.api.async_get_viewer_details()
|
||||||
|
|
||||||
|
# Validate data before accepting it (especially on initial setup)
|
||||||
|
if not self._validate_user_data(user_data, self.home_id):
|
||||||
|
msg = "Received incomplete user data from API - cannot proceed with price fetching"
|
||||||
|
self._log("error", msg)
|
||||||
|
raise TibberPricesApiClientError(msg) # noqa: TRY301
|
||||||
|
|
||||||
|
self._cached_user_data = user_data
|
||||||
|
self._last_user_update = current_time
|
||||||
|
except (
|
||||||
|
TibberPricesApiClientError,
|
||||||
|
TibberPricesApiClientCommunicationError,
|
||||||
|
) as ex:
|
||||||
|
msg = f"Failed to fetch user data (required for price fetching): {ex}"
|
||||||
|
self._log("error", msg)
|
||||||
|
raise TibberPricesApiClientError(msg) from ex
|
||||||
|
|
||||||
|
# At this point, _cached_user_data is guaranteed to be not None (checked above)
|
||||||
|
if not self._cached_user_data:
|
||||||
|
msg = "User data unexpectedly None after fetch attempt"
|
||||||
|
raise TibberPricesApiClientError(msg)
|
||||||
|
|
||||||
|
# Retrieve price data via IntervalPool (single source of truth)
|
||||||
|
price_info, api_called = await self._fetch_via_pool(home_id, include_tomorrow=include_tomorrow)
|
||||||
|
|
||||||
|
# Extract currency for this home from user_data
|
||||||
|
currency = self._get_currency_for_home(home_id)
|
||||||
|
|
||||||
|
self._log(
|
||||||
|
"debug",
|
||||||
|
"Successfully fetched data for home %s (%d intervals, api_called=%s)",
|
||||||
|
home_id,
|
||||||
|
len(price_info),
|
||||||
|
api_called,
|
||||||
|
)
|
||||||
|
|
||||||
|
return (
|
||||||
|
{
|
||||||
|
"timestamp": current_time,
|
||||||
|
"home_id": home_id,
|
||||||
|
"price_info": price_info,
|
||||||
|
"currency": currency,
|
||||||
|
},
|
||||||
|
api_called,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _fetch_via_pool(
|
||||||
|
self,
|
||||||
|
home_id: str,
|
||||||
|
*,
|
||||||
|
include_tomorrow: bool = True,
|
||||||
|
) -> tuple[list[dict[str, Any]], bool]:
|
||||||
|
"""
|
||||||
|
Retrieve price data via IntervalPool.
|
||||||
|
|
||||||
|
The IntervalPool is the single source of truth for price data:
|
||||||
|
- Handles actual API calls to Tibber
|
||||||
|
- Manages deduplication and caching
|
||||||
|
- Provides intervals from day-before-yesterday to end-of-today/tomorrow
|
||||||
|
|
||||||
|
This method delegates to the Pool's get_sensor_data() which returns
|
||||||
|
all relevant intervals for sensor display.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
home_id: Home ID (currently unused, Pool knows its home).
|
||||||
|
include_tomorrow: If True, request tomorrow's data too. If False,
|
||||||
|
only request up to end of today. This prevents
|
||||||
|
API spam before 13:00 when Tibber doesn't have
|
||||||
|
tomorrow data yet.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (intervals, api_called):
|
||||||
|
- intervals: List of price interval dicts.
|
||||||
|
- api_called: True if API was called to fetch missing data.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# user_data is guaranteed by fetch_home_data(), but needed for type narrowing
|
||||||
|
if self._cached_user_data is None:
|
||||||
|
return [], False # No data, no API call
|
||||||
|
|
||||||
|
self._log(
|
||||||
|
"debug",
|
||||||
|
"Retrieving price data for home %s via interval pool (include_tomorrow=%s)",
|
||||||
|
home_id,
|
||||||
|
include_tomorrow,
|
||||||
|
)
|
||||||
|
intervals, api_called = await self._interval_pool.get_sensor_data(
|
||||||
|
api_client=self.api,
|
||||||
|
user_data=self._cached_user_data,
|
||||||
|
include_tomorrow=include_tomorrow,
|
||||||
|
)
|
||||||
|
|
||||||
|
return intervals, api_called
|
||||||
|
|
||||||
|
def _get_currency_for_home(self, home_id: str) -> str:
|
||||||
|
"""
|
||||||
|
Get currency for a specific home from cached user_data.
|
||||||
|
|
||||||
|
Note: The cached user_data is validated before storage, so if we have
|
||||||
|
cached data it should contain valid currency. This method extracts
|
||||||
|
the currency from the nested structure.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Currency code (e.g., "EUR", "NOK", "SEK").
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
TibberPricesApiClientError: If currency cannot be determined.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if not self._cached_user_data:
|
||||||
|
msg = "No user data cached - cannot determine currency"
|
||||||
|
self._log("error", msg)
|
||||||
|
raise TibberPricesApiClientError(msg)
|
||||||
|
|
||||||
|
viewer = self._cached_user_data.get("viewer", {})
|
||||||
|
homes = viewer.get("homes", [])
|
||||||
|
|
||||||
|
for home in homes:
|
||||||
|
if home.get("id") == home_id:
|
||||||
|
# Extract currency from nested structure
|
||||||
|
# Use 'or {}' to handle None values (homes without active subscription)
|
||||||
|
subscription = home.get("currentSubscription") or {}
|
||||||
|
price_info = subscription.get("priceInfo") or {}
|
||||||
|
current = price_info.get("current") or {}
|
||||||
|
currency = current.get("currency")
|
||||||
|
|
||||||
|
if not currency:
|
||||||
|
# This should not happen if validation worked correctly
|
||||||
|
msg = f"Home {home_id} has no active subscription - currency unavailable"
|
||||||
|
self._log("error", msg)
|
||||||
|
raise TibberPricesApiClientError(msg)
|
||||||
|
|
||||||
|
self._log("debug", "Extracted currency %s for home %s", currency, home_id)
|
||||||
|
return currency
|
||||||
|
|
||||||
|
# Home not found in cached data - data validation should have caught this
|
||||||
|
msg = f"Home {home_id} not found in user data - data validation failed"
|
||||||
|
self._log("error", msg)
|
||||||
|
raise TibberPricesApiClientError(msg)
|
||||||
|
|
||||||
|
def _check_home_exists(self, home_id: str) -> bool:
|
||||||
|
"""
|
||||||
|
Check if a home ID exists in cached user data.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
home_id: The home ID to check.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if home exists, False otherwise.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if not self._cached_user_data:
|
||||||
|
# No user data yet - assume home exists (will be checked on next update)
|
||||||
|
return True
|
||||||
|
|
||||||
|
viewer = self._cached_user_data.get("viewer", {})
|
||||||
|
homes = viewer.get("homes", [])
|
||||||
|
|
||||||
|
return any(home.get("id") == home_id for home in homes)
|
||||||
|
|
||||||
|
async def handle_main_entry_update(
|
||||||
|
self,
|
||||||
|
current_time: datetime,
|
||||||
|
home_id: str,
|
||||||
|
transform_fn: Callable[[dict[str, Any]], dict[str, Any]],
|
||||||
|
*,
|
||||||
|
current_price_info: list[dict[str, Any]] | None = None,
|
||||||
|
) -> tuple[dict[str, Any], bool]:
|
||||||
|
"""
|
||||||
|
Handle update for main entry - fetch data for this home.
|
||||||
|
|
||||||
|
The IntervalPool is the single source of truth for price data:
|
||||||
|
- It handles API fetching, deduplication, and caching internally
|
||||||
|
- We decide WHEN to fetch tomorrow data (after 13:00, if not already present)
|
||||||
|
- This prevents API spam before 13:00 when Tibber doesn't have tomorrow data
|
||||||
|
|
||||||
|
This method:
|
||||||
|
1. Updates user data if needed (daily)
|
||||||
|
2. Determines if tomorrow data should be requested
|
||||||
|
3. Fetches price data via IntervalPool
|
||||||
|
4. Transforms result for coordinator
|
||||||
|
|
||||||
|
Args:
|
||||||
|
current_time: Current time for update decisions.
|
||||||
|
home_id: Home ID to fetch data for.
|
||||||
|
transform_fn: Function to transform raw data for coordinator.
|
||||||
|
current_price_info: Current price intervals (from coordinator.data["priceInfo"]).
|
||||||
|
Used to check if tomorrow data already exists.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (transformed_data, api_called):
|
||||||
|
- transformed_data: Transformed data dict for coordinator.
|
||||||
|
- api_called: True if API was called to fetch missing data.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# Update user data if needed (daily check)
|
||||||
|
user_data_updated = await self.update_user_data_if_needed(current_time)
|
||||||
|
|
||||||
|
# Check if this home still exists in user data after update
|
||||||
|
# This detects when a home was removed from the Tibber account
|
||||||
|
home_exists = self._check_home_exists(home_id)
|
||||||
|
if not home_exists:
|
||||||
|
self._log("warning", "Home ID %s not found in Tibber account", home_id)
|
||||||
|
# Return a special marker in the result that coordinator can check
|
||||||
|
result = transform_fn({})
|
||||||
|
result["_home_not_found"] = True # Special marker for coordinator
|
||||||
|
return result, False # No API call made (home doesn't exist)
|
||||||
|
|
||||||
|
# Determine if we should request tomorrow data
|
||||||
|
include_tomorrow = self.should_fetch_tomorrow_data(current_price_info)
|
||||||
|
|
||||||
|
# Fetch price data via IntervalPool
|
||||||
|
self._log(
|
||||||
|
"debug",
|
||||||
|
"Fetching price data for home %s via interval pool (include_tomorrow=%s)",
|
||||||
|
home_id,
|
||||||
|
include_tomorrow,
|
||||||
|
)
|
||||||
|
raw_data, api_called = await self.fetch_home_data(home_id, current_time, include_tomorrow=include_tomorrow)
|
||||||
|
|
||||||
|
# Parse timestamps immediately after fetch
|
||||||
|
raw_data = helpers.parse_all_timestamps(raw_data, time=self.time)
|
||||||
|
|
||||||
|
# Store user data cache (price data persisted by IntervalPool)
|
||||||
|
if user_data_updated:
|
||||||
|
await self.store_cache()
|
||||||
|
|
||||||
|
# Transform for main entry
|
||||||
|
return transform_fn(raw_data), api_called
|
||||||
|
|
||||||
|
async def handle_api_error(
|
||||||
|
self,
|
||||||
|
error: Exception,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Handle API errors - re-raise appropriate exceptions.
|
||||||
|
|
||||||
|
Note: With IntervalPool as source of truth, there's no local price cache
|
||||||
|
to fall back to. The Pool has its own persistence, so on next update
|
||||||
|
it will use its cached intervals if API is unavailable.
|
||||||
|
"""
|
||||||
|
if isinstance(error, TibberPricesApiClientAuthenticationError):
|
||||||
|
msg = "Invalid access token"
|
||||||
|
raise ConfigEntryAuthFailed(msg) from error
|
||||||
|
|
||||||
|
msg = f"Error communicating with API: {error}"
|
||||||
|
raise UpdateFailed(msg) from error
|
||||||
|
|
||||||
|
@property
|
||||||
|
def cached_user_data(self) -> dict[str, Any] | None:
|
||||||
|
"""Get cached user data."""
|
||||||
|
return self._cached_user_data
|
||||||
|
|
||||||
|
def has_tomorrow_data(self, price_info: list[dict[str, Any]]) -> bool:
|
||||||
|
"""
|
||||||
|
Check if tomorrow's price data is available.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
price_info: List of price intervals from coordinator data.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if at least one interval from tomorrow is present.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if not price_info:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Get tomorrow's date
|
||||||
|
now = self.time.now()
|
||||||
|
tomorrow = (self.time.as_local(now) + timedelta(days=1)).date()
|
||||||
|
|
||||||
|
# Check if any interval is from tomorrow
|
||||||
|
for interval in price_info:
|
||||||
|
if "startsAt" not in interval:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# startsAt is already a datetime object after _transform_data()
|
||||||
|
interval_time = interval["startsAt"]
|
||||||
|
if isinstance(interval_time, str):
|
||||||
|
# Fallback: parse if still string (shouldn't happen with transformed data)
|
||||||
|
interval_time = self.time.parse_datetime(interval_time)
|
||||||
|
|
||||||
|
if interval_time and self.time.as_local(interval_time).date() == tomorrow:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
@ -2,7 +2,9 @@
|
||||||
"apexcharts": {
|
"apexcharts": {
|
||||||
"title_rating_level": "Preisphasen Tagesverlauf",
|
"title_rating_level": "Preisphasen Tagesverlauf",
|
||||||
"title_level": "Preisniveau",
|
"title_level": "Preisniveau",
|
||||||
"best_price_period_name": "Beste Preisperiode",
|
"hourly_suffix": "(Ø stündlich)",
|
||||||
|
"best_price_period_name": "Bestpreis-Zeitraum",
|
||||||
|
"peak_price_period_name": "Spitzenpreis-Zeitraum",
|
||||||
"notification": {
|
"notification": {
|
||||||
"metadata_sensor_unavailable": {
|
"metadata_sensor_unavailable": {
|
||||||
"title": "Tibber Prices: ApexCharts YAML mit eingeschränkter Funktionalität generiert",
|
"title": "Tibber Prices: ApexCharts YAML mit eingeschränkter Funktionalität generiert",
|
||||||
|
|
@ -290,24 +292,24 @@
|
||||||
"long_description": "Zeigt den Zeitstempel des letzten verfügbaren Preisdatenintervalls von deinem Tibber-Abonnement"
|
"long_description": "Zeigt den Zeitstempel des letzten verfügbaren Preisdatenintervalls von deinem Tibber-Abonnement"
|
||||||
},
|
},
|
||||||
"today_volatility": {
|
"today_volatility": {
|
||||||
"description": "Preisvolatilitätsklassifizierung für heute",
|
"description": "Wie stark sich die Strompreise heute verändern",
|
||||||
"long_description": "Zeigt, wie stark die Strompreise im Laufe des heutigen Tages variieren, basierend auf der Spannweite (Differenz zwischen höchstem und niedrigstem Preis). Klassifizierung: niedrig = Spannweite < 5ct, moderat = 5-15ct, hoch = 15-30ct, sehr hoch = >30ct.",
|
"long_description": "Zeigt, ob die heutigen Preise stabil bleiben oder stark schwanken. Niedrige Volatilität bedeutet recht konstante Preise – Timing ist kaum wichtig. Hohe Volatilität bedeutet spürbare Preisunterschiede über den Tag – gute Chance, den Verbrauch auf günstigere Zeiten zu verschieben. `price_coefficient_variation_%` zeigt den Prozentwert, `price_spread` die absolute Preisspanne.",
|
||||||
"usage_tips": "Verwende dies, um zu entscheiden, ob preisbasierte Optimierung lohnenswert ist. Zum Beispiel lohnt sich bei einer Balkonbatterie mit 15% Effizienzverlusten die Optimierung nur, wenn die Volatilität mindestens moderat ist. Erstelle Automatisierungen, die die Volatilität prüfen, bevor Lade-/Entladezyklen geplant werden."
|
"usage_tips": "Nutze dies, um zu entscheiden, ob Optimierung sich lohnt. Bei niedriger Volatilität kannst du Geräte jederzeit laufen lassen. Bei hoher Volatilität sparst du spürbar, wenn du Best-Price-Perioden nutzt."
|
||||||
},
|
},
|
||||||
"tomorrow_volatility": {
|
"tomorrow_volatility": {
|
||||||
"description": "Preisvolatilitätsklassifizierung für morgen",
|
"description": "Wie stark sich die Strompreise morgen verändern werden",
|
||||||
"long_description": "Zeigt, wie stark die Strompreise im Laufe des morgigen Tages variieren werden, basierend auf der Spannweite (Differenz zwischen höchstem und niedrigstem Preis). Wird nicht verfügbar, bis morgige Daten veröffentlicht sind (typischerweise 13:00-14:00 MEZ).",
|
"long_description": "Zeigt, ob die Preise morgen stabil bleiben oder stark schwanken. Verfügbar, sobald die morgigen Daten veröffentlicht sind (typischerweise 13:00–14:00 MEZ). Niedrige Volatilität bedeutet recht konstante Preise – Timing ist nicht kritisch. Hohe Volatilität bedeutet deutliche Preisunterschiede über den Tag – gute Gelegenheit, energieintensive Aufgaben zu planen. `price_coefficient_variation_%` zeigt den Prozentwert, `price_spread` die absolute Preisspanne.",
|
||||||
"usage_tips": "Verwende dies zur Vorausplanung des morgigen Energieverbrauchs. Bei hoher oder sehr hoher Volatilität morgen lohnt sich die Optimierung des Energieverbrauchs. Bei niedriger Volatilität kannst du Geräte jederzeit ohne wesentliche Kostenunterschiede betreiben."
|
"usage_tips": "Nutze dies für die Planung des morgigen Energieverbrauchs. Hohe Volatilität? Plane flexible Lasten in Best-Price-Perioden. Niedrige Volatilität? Lass Geräte laufen, wann es dir passt."
|
||||||
},
|
},
|
||||||
"next_24h_volatility": {
|
"next_24h_volatility": {
|
||||||
"description": "Preisvolatilitätsklassifizierung für die rollierenden nächsten 24 Stunden",
|
"description": "Wie stark sich die Preise in den nächsten 24 Stunden verändern",
|
||||||
"long_description": "Zeigt, wie stark die Strompreise in den nächsten 24 Stunden ab jetzt variieren (rollierendes Fenster). Dies überschreitet Tagesgrenzen und aktualisiert sich alle 15 Minuten, wodurch eine vorausschauende Volatilitätsbewertung unabhängig von Kalendertagen bereitgestellt wird.",
|
"long_description": "Zeigt die Preisvolatilität für ein rollierendes 24-Stunden-Fenster ab jetzt (aktualisiert alle 15 Minuten). Niedrige Volatilität bedeutet recht konstante Preise. Hohe Volatilität bedeutet spürbare Preisschwankungen und damit Chancen zur Optimierung. Im Unterschied zu Heute/Morgen-Sensoren überschreitet dieser Tagesgrenzen und liefert eine durchgängige Vorhersage. `price_coefficient_variation_%` zeigt den Prozentwert, `price_spread` die absolute Preisspanne.",
|
||||||
"usage_tips": "Bester Sensor für Echtzeitoptimierungsentscheidungen. Im Gegensatz zu Heute/Morgen-Sensoren, die um Mitternacht wechseln, bietet dies eine kontinuierliche 24h-Volatilitätsbewertung. Verwende dies für Batterielade-Strategien, die Tagesgrenzen überschreiten."
|
"usage_tips": "Am besten für Entscheidungen in Echtzeit. Nutze dies für Batterieladestrategien oder andere flexible Lasten, die über Mitternacht laufen könnten. Bietet eine konsistente 24h-Perspektive unabhängig vom Kalendertag."
|
||||||
},
|
},
|
||||||
"today_tomorrow_volatility": {
|
"today_tomorrow_volatility": {
|
||||||
"description": "Kombinierte Preisvolatilitätsklassifizierung für heute und morgen",
|
"description": "Kombinierte Preisvolatilität für heute und morgen",
|
||||||
"long_description": "Zeigt die Volatilität über heute und morgen zusammen (wenn morgige Daten verfügbar sind). Bietet eine erweiterte Ansicht der Preisvariation über bis zu 48 Stunden. Fällt auf Nur-Heute zurück, wenn morgige Daten noch nicht verfügbar sind.",
|
"long_description": "Zeigt die Gesamtvolatilität, wenn heute und morgen gemeinsam betrachtet werden (sobald die morgigen Daten verfügbar sind). Zeigt, ob über die Tagesgrenze hinweg deutliche Preisunterschiede bestehen. Fällt auf nur-heute zurück, wenn morgige Daten noch fehlen. Hilfreich für mehrtägige Optimierung. `price_coefficient_variation_%` zeigt den Prozentwert, `price_spread` die absolute Preisspanne.",
|
||||||
"usage_tips": "Verwende dies für Mehrtagsplanung und um zu verstehen, ob Preismöglichkeiten über die Tagesgrenze hinweg bestehen. Die Attribute 'today_volatility' und 'tomorrow_volatility' zeigen individuelle Tagesbeiträge. Nützlich für die Planung von Ladesitzungen, die Mitternacht überschreiten könnten."
|
"usage_tips": "Nutze dies für Aufgaben, die sich über mehrere Tage erstrecken. Prüfe, ob die Preisunterschiede groß genug für eine Planung sind. Die einzelnen Tages-Sensoren zeigen die Beiträge pro Tag, falls du mehr Details brauchst."
|
||||||
},
|
},
|
||||||
"data_lifecycle_status": {
|
"data_lifecycle_status": {
|
||||||
"description": "Aktueller Status des Preisdaten-Lebenszyklus und der Zwischenspeicherung",
|
"description": "Aktueller Status des Preisdaten-Lebenszyklus und der Zwischenspeicherung",
|
||||||
|
|
@ -320,14 +322,14 @@
|
||||||
"usage_tips": "Nutze dies, um einen Countdown wie 'Günstiger Zeitraum endet in 2 Stunden' (wenn aktiv) oder 'Nächster günstiger Zeitraum endet um 14:00' (wenn inaktiv) anzuzeigen. Home Assistant zeigt automatisch relative Zeit für Zeitstempel-Sensoren an."
|
"usage_tips": "Nutze dies, um einen Countdown wie 'Günstiger Zeitraum endet in 2 Stunden' (wenn aktiv) oder 'Nächster günstiger Zeitraum endet um 14:00' (wenn inaktiv) anzuzeigen. Home Assistant zeigt automatisch relative Zeit für Zeitstempel-Sensoren an."
|
||||||
},
|
},
|
||||||
"best_price_period_duration": {
|
"best_price_period_duration": {
|
||||||
"description": "Gesamtlänge des aktuellen oder nächsten günstigen Zeitraums in Minuten",
|
"description": "Gesamtlänge des aktuellen oder nächsten günstigen Zeitraums",
|
||||||
"long_description": "Zeigt, wie lange der günstige Zeitraum insgesamt dauert. Während eines aktiven Zeitraums zeigt dies die Dauer des aktuellen Zeitraums. Wenn kein Zeitraum aktiv ist, zeigt dies die Dauer des nächsten kommenden Zeitraums. Gibt nur 'Unbekannt' zurück, wenn keine Zeiträume ermittelt wurden.",
|
"long_description": "Zeigt, wie lange der günstige Zeitraum insgesamt dauert. Der State wird in Stunden angezeigt (z. B. 1,5 h) für eine einfache Lesbarkeit in der UI, während das Attribut `period_duration_minutes` denselben Wert in Minuten bereitstellt (z. B. 90) für Automationen. Während eines aktiven Zeitraums zeigt dies die Dauer des aktuellen Zeitraums. Wenn kein Zeitraum aktiv ist, zeigt dies die Dauer des nächsten kommenden Zeitraums. Gibt nur 'Unbekannt' zurück, wenn keine Zeiträume ermittelt wurden.",
|
||||||
"usage_tips": "Nützlich für Planung: 'Der nächste günstige Zeitraum dauert 90 Minuten' oder 'Der aktuelle günstige Zeitraum ist 120 Minuten lang'. Kombiniere mit remaining_minutes, um zu berechnen, wann langlaufende Geräte gestartet werden sollten."
|
"usage_tips": "Für Anzeige: State-Wert (Stunden) in Dashboards nutzen. Für Automationen: Attribut `period_duration_minutes` verwenden, um zu prüfen, ob genug Zeit für langläufige Geräte ist (z. B. 'Wenn period_duration_minutes >= 90, starte Waschmaschine')."
|
||||||
},
|
},
|
||||||
"best_price_remaining_minutes": {
|
"best_price_remaining_minutes": {
|
||||||
"description": "Verbleibende Minuten im aktuellen günstigen Zeitraum (0 wenn inaktiv)",
|
"description": "Verbleibende Zeit im aktuellen günstigen Zeitraum",
|
||||||
"long_description": "Zeigt, wie viele Minuten im aktuellen günstigen Zeitraum noch verbleiben. Gibt 0 zurück, wenn kein Zeitraum aktiv ist. Aktualisiert sich jede Minute. Prüfe binary_sensor.best_price_period, um zu sehen, ob ein Zeitraum aktuell aktiv ist.",
|
"long_description": "Zeigt, wie viel Zeit im aktuellen günstigen Zeitraum noch verbleibt. Der State wird in Stunden angezeigt (z. B. 0,5 h) für eine einfache Lesbarkeit, während das Attribut `remaining_minutes` Minuten bereitstellt (z. B. 30) für Automationslogik. Gibt 0 zurück, wenn kein Zeitraum aktiv ist. Aktualisiert sich jede Minute. Prüfe binary_sensor.best_price_period, um zu sehen, ob ein Zeitraum aktuell aktiv ist.",
|
||||||
"usage_tips": "Perfekt für Automatisierungen: 'Wenn remaining_minutes > 0 UND remaining_minutes < 30, starte Waschmaschine jetzt'. Der Wert 0 macht es einfach zu prüfen, ob ein Zeitraum aktiv ist (Wert > 0) oder nicht (Wert = 0)."
|
"usage_tips": "Für Automationen: Attribut `remaining_minutes` mit numerischen Vergleichen nutzen wie 'Wenn remaining_minutes > 0 UND remaining_minutes < 30, starte Waschmaschine jetzt'. Der Wert 0 macht es einfach zu prüfen, ob ein Zeitraum aktiv ist (Wert > 0) oder nicht (Wert = 0)."
|
||||||
},
|
},
|
||||||
"best_price_progress": {
|
"best_price_progress": {
|
||||||
"description": "Fortschritt durch aktuellen günstigen Zeitraum (0% wenn inaktiv)",
|
"description": "Fortschritt durch aktuellen günstigen Zeitraum (0% wenn inaktiv)",
|
||||||
|
|
@ -340,9 +342,9 @@
|
||||||
"usage_tips": "Immer nützlich für Vorausplanung: 'Nächster günstiger Zeitraum startet in 3 Stunden' (egal, ob du gerade in einem Zeitraum bist oder nicht). Kombiniere mit Automatisierungen: 'Wenn nächste Startzeit in 10 Minuten ist, sende Benachrichtigung zur Vorbereitung der Waschmaschine'."
|
"usage_tips": "Immer nützlich für Vorausplanung: 'Nächster günstiger Zeitraum startet in 3 Stunden' (egal, ob du gerade in einem Zeitraum bist oder nicht). Kombiniere mit Automatisierungen: 'Wenn nächste Startzeit in 10 Minuten ist, sende Benachrichtigung zur Vorbereitung der Waschmaschine'."
|
||||||
},
|
},
|
||||||
"best_price_next_in_minutes": {
|
"best_price_next_in_minutes": {
|
||||||
"description": "Minuten bis nächster günstiger Zeitraum startet (0 beim Übergang)",
|
"description": "Zeit bis zum nächsten günstigen Zeitraum",
|
||||||
"long_description": "Zeigt Minuten bis der nächste günstige Zeitraum startet. Während eines aktiven Zeitraums zeigt dies die Zeit bis zum Zeitraum nach dem aktuellen. Gibt 0 während kurzer Übergangsphasen zurück. Aktualisiert sich jede Minute.",
|
"long_description": "Zeigt, wie lange es bis zum nächsten günstigen Zeitraum dauert. Der State wird in Stunden angezeigt (z. B. 2,25 h) für Dashboards, während das Attribut `next_in_minutes` Minuten bereitstellt (z. B. 135) für Automationsbedingungen. Während eines aktiven Zeitraums zeigt dies die Zeit bis zum Zeitraum nach dem aktuellen. Gibt 0 während kurzer Übergangsphasen zurück. Aktualisiert sich jede Minute.",
|
||||||
"usage_tips": "Perfekt für 'warte bis günstiger Zeitraum' Automatisierungen: 'Wenn next_in_minutes > 0 UND next_in_minutes < 15, warte, bevor du die Geschirrspülmaschine startest'. Wert > 0 zeigt immer an, dass ein zukünftiger Zeitraum geplant ist."
|
"usage_tips": "Für Automationen: Attribut `next_in_minutes` nutzen wie 'Wenn next_in_minutes > 0 UND next_in_minutes < 15, warte, bevor du die Geschirrspülmaschine startest'. Wert > 0 zeigt immer an, dass ein zukünftiger Zeitraum geplant ist."
|
||||||
},
|
},
|
||||||
"peak_price_end_time": {
|
"peak_price_end_time": {
|
||||||
"description": "Wann der aktuelle oder nächste teure Zeitraum endet",
|
"description": "Wann der aktuelle oder nächste teure Zeitraum endet",
|
||||||
|
|
@ -350,14 +352,14 @@
|
||||||
"usage_tips": "Nutze dies, um 'Teurer Zeitraum endet in 1 Stunde' (wenn aktiv) oder 'Nächster teurer Zeitraum endet um 18:00' (wenn inaktiv) anzuzeigen. Kombiniere mit Automatisierungen, um den Betrieb nach der Spitzenzeit fortzusetzen."
|
"usage_tips": "Nutze dies, um 'Teurer Zeitraum endet in 1 Stunde' (wenn aktiv) oder 'Nächster teurer Zeitraum endet um 18:00' (wenn inaktiv) anzuzeigen. Kombiniere mit Automatisierungen, um den Betrieb nach der Spitzenzeit fortzusetzen."
|
||||||
},
|
},
|
||||||
"peak_price_period_duration": {
|
"peak_price_period_duration": {
|
||||||
"description": "Gesamtlänge des aktuellen oder nächsten teuren Zeitraums in Minuten",
|
"description": "Länge des aktuellen/nächsten teuren Zeitraums",
|
||||||
"long_description": "Zeigt, wie lange der teure Zeitraum insgesamt dauert. Während eines aktiven Zeitraums zeigt dies die Dauer des aktuellen Zeitraums. Wenn kein Zeitraum aktiv ist, zeigt dies die Dauer des nächsten kommenden Zeitraums. Gibt nur 'Unbekannt' zurück, wenn keine Zeiträume ermittelt wurden.",
|
"long_description": "Gesamtdauer des aktuellen oder nächsten teuren Zeitraums. Der State wird in Stunden angezeigt (z. B. 1,5 h) für leichtes Ablesen in der UI, während das Attribut `period_duration_minutes` denselben Wert in Minuten bereitstellt (z. B. 90) für Automationen. Dieser Wert repräsentiert die **volle geplante Dauer** des Zeitraums und ist konstant während des gesamten Zeitraums, auch wenn die verbleibende Zeit (remaining_minutes) abnimmt.",
|
||||||
"usage_tips": "Nützlich für Planung: 'Der nächste teure Zeitraum dauert 60 Minuten' oder 'Der aktuelle Spitzenzeitraum ist 90 Minuten lang'. Kombiniere mit remaining_minutes, um zu entscheiden, ob die Spitze abgewartet oder der Betrieb fortgesetzt werden soll."
|
"usage_tips": "Kombiniere mit remaining_minutes, um zu berechnen, wann langlaufende Geräte gestoppt werden sollen: Zeitraum begann vor `period_duration_minutes - remaining_minutes` Minuten. Dieses Attribut unterstützt Energiespar-Strategien, indem es hilft, Hochverbrauchsaktivitäten außerhalb teurer Perioden zu planen."
|
||||||
},
|
},
|
||||||
"peak_price_remaining_minutes": {
|
"peak_price_remaining_minutes": {
|
||||||
"description": "Verbleibende Minuten im aktuellen teuren Zeitraum (0 wenn inaktiv)",
|
"description": "Verbleibende Zeit im aktuellen teuren Zeitraum",
|
||||||
"long_description": "Zeigt, wie viele Minuten im aktuellen teuren Zeitraum noch verbleiben. Gibt 0 zurück, wenn kein Zeitraum aktiv ist. Aktualisiert sich jede Minute. Prüfe binary_sensor.peak_price_period, um zu sehen, ob ein Zeitraum aktuell aktiv ist.",
|
"long_description": "Zeigt, wie viel Zeit im aktuellen teuren Zeitraum noch verbleibt. Der State wird in Stunden angezeigt (z. B. 0,75 h) für einfaches Ablesen in Dashboards, während das Attribut `remaining_minutes` dieselbe Zeit in Minuten liefert (z. B. 45) für Automationsbedingungen. **Countdown-Timer**: Dieser Wert dekrementiert jede Minute während eines aktiven Zeitraums. Gibt 0 zurück, wenn kein teurer Zeitraum aktiv ist. Aktualisiert sich minütlich.",
|
||||||
"usage_tips": "Nutze in Automatisierungen: 'Wenn remaining_minutes > 60, breche aufgeschobene Ladesitzung ab'. Wert 0 macht es einfach zu unterscheiden zwischen aktivem (Wert > 0) und inaktivem (Wert = 0) Zeitraum."
|
"usage_tips": "Für Automationen: Nutze Attribut `remaining_minutes` wie 'Wenn remaining_minutes > 60, setze Heizung auf Energiesparmodus' oder 'Wenn remaining_minutes < 15, erhöhe Temperatur wieder'. UI zeigt benutzerfreundliche Stunden (z. B. 1,25 h). Wert 0 zeigt an, dass kein teurer Zeitraum aktiv ist."
|
||||||
},
|
},
|
||||||
"peak_price_progress": {
|
"peak_price_progress": {
|
||||||
"description": "Fortschritt durch aktuellen teuren Zeitraum (0% wenn inaktiv)",
|
"description": "Fortschritt durch aktuellen teuren Zeitraum (0% wenn inaktiv)",
|
||||||
|
|
@ -370,9 +372,9 @@
|
||||||
"usage_tips": "Immer nützlich für Planung: 'Nächster teurer Zeitraum startet in 2 Stunden'. Automatisierung: 'Wenn nächste Startzeit in 30 Minuten ist, reduziere Heiztemperatur vorsorglich'."
|
"usage_tips": "Immer nützlich für Planung: 'Nächster teurer Zeitraum startet in 2 Stunden'. Automatisierung: 'Wenn nächste Startzeit in 30 Minuten ist, reduziere Heiztemperatur vorsorglich'."
|
||||||
},
|
},
|
||||||
"peak_price_next_in_minutes": {
|
"peak_price_next_in_minutes": {
|
||||||
"description": "Minuten bis nächster teurer Zeitraum startet (0 beim Übergang)",
|
"description": "Zeit bis zum nächsten teuren Zeitraum",
|
||||||
"long_description": "Zeigt Minuten bis der nächste teure Zeitraum startet. Während eines aktiven Zeitraums zeigt dies die Zeit bis zum Zeitraum nach dem aktuellen. Gibt 0 während kurzer Übergangsphasen zurück. Aktualisiert sich jede Minute.",
|
"long_description": "Zeigt, wie lange es bis zum nächsten teuren Zeitraum dauert. Der State wird in Stunden angezeigt (z. B. 2,25 h) für Dashboards, während das Attribut `next_in_minutes` Minuten bereitstellt (z. B. 135) für Automationsbedingungen. Während eines aktiven Zeitraums zeigt dies die Zeit bis zum Zeitraum nach dem aktuellen. Gibt 0 während kurzer Übergangsphasen zurück. Aktualisiert sich jede Minute.",
|
||||||
"usage_tips": "Präventive Automatisierung: 'Wenn next_in_minutes > 0 UND next_in_minutes < 10, beende aktuellen Ladezyklus jetzt, bevor die Preise steigen'."
|
"usage_tips": "Für Automationen: Attribut `next_in_minutes` nutzen wie 'Wenn next_in_minutes > 0 UND next_in_minutes < 10, reduziere Heizung vorsorglich bevor der teure Zeitraum beginnt'. Wert > 0 zeigt immer an, dass ein zukünftiger teurer Zeitraum geplant ist."
|
||||||
},
|
},
|
||||||
"home_type": {
|
"home_type": {
|
||||||
"description": "Art der Wohnung (Wohnung, Haus usw.)",
|
"description": "Art der Wohnung (Wohnung, Haus usw.)",
|
||||||
|
|
@ -487,6 +489,80 @@
|
||||||
"usage_tips": "Verwende dies, um zu überprüfen, ob Echtzeit-Verbrauchsdaten verfügbar sind. Aktiviere Benachrichtigungen, falls dies unerwartet auf 'Aus' wechselt, was auf potenzielle Hardware- oder Verbindungsprobleme hinweist."
|
"usage_tips": "Verwende dies, um zu überprüfen, ob Echtzeit-Verbrauchsdaten verfügbar sind. Aktiviere Benachrichtigungen, falls dies unerwartet auf 'Aus' wechselt, was auf potenzielle Hardware- oder Verbindungsprobleme hinweist."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"number": {
|
||||||
|
"best_price_flex_override": {
|
||||||
|
"description": "Maximaler Prozentsatz über dem Tagesminimumpreis, den Intervalle haben können und trotzdem als 'Bestpreis' gelten. Empfohlen: 15-20 mit Lockerung aktiviert (Standard), oder 25-35 ohne Lockerung. Maximum: 50 (Obergrenze für zuverlässige Periodenerkennung).",
|
||||||
|
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Flexibilität' aus dem Optionen-Dialog für die Bestpreis-Periodenberechnung.",
|
||||||
|
"usage_tips": "Aktiviere diese Entität, um die Bestpreiserkennung dynamisch über Automatisierungen anzupassen, z.B. höhere Flexibilität bei kritischen Lasten oder engere Anforderungen für flexible Geräte."
|
||||||
|
},
|
||||||
|
"best_price_min_distance_override": {
|
||||||
|
"description": "Minimaler prozentualer Abstand unter dem Tagesdurchschnitt. Intervalle müssen so weit unter dem Durchschnitt liegen, um als 'Bestpreis' zu gelten. Hilft, echte Niedrigpreis-Perioden von durchschnittlichen Preisen zu unterscheiden.",
|
||||||
|
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestabstand' aus dem Optionen-Dialog für die Bestpreis-Periodenberechnung.",
|
||||||
|
"usage_tips": "Erhöhe den Wert, wenn du strengere Bestpreis-Kriterien möchtest. Verringere ihn, wenn zu wenige Perioden erkannt werden."
|
||||||
|
},
|
||||||
|
"best_price_min_period_length_override": {
|
||||||
|
"description": "Minimale Periodenl\u00e4nge in 15-Minuten-Intervallen. Perioden kürzer als diese werden nicht gemeldet. Beispiel: 2 = mindestens 30 Minuten.",
|
||||||
|
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestperiodenlänge' aus dem Optionen-Dialog für die Bestpreis-Periodenberechnung.",
|
||||||
|
"usage_tips": "Passe an die typische Laufzeit deiner Geräte an: 2 (30 Min) für Schnellprogramme, 4-8 (1-2 Std) für normale Zyklen, 8+ für lange ECO-Programme."
|
||||||
|
},
|
||||||
|
"best_price_min_periods_override": {
|
||||||
|
"description": "Minimale Anzahl an Bestpreis-Perioden, die täglich gefunden werden sollen. Wenn Lockerung aktiviert ist, wird das System die Kriterien automatisch anpassen, um diese Zahl zu erreichen.",
|
||||||
|
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestperioden' aus dem Optionen-Dialog für die Bestpreis-Periodenberechnung.",
|
||||||
|
"usage_tips": "Setze dies auf die Anzahl zeitkritischer Aufgaben, die du täglich hast. Beispiel: 2 für zwei Waschmaschinenladungen."
|
||||||
|
},
|
||||||
|
"best_price_relaxation_attempts_override": {
|
||||||
|
"description": "Anzahl der Versuche, die Kriterien schrittweise zu lockern, um die Mindestperiodenanzahl zu erreichen. Jeder Versuch erhöht die Flexibilität um 3 Prozent. Bei 0 werden nur Basis-Kriterien verwendet.",
|
||||||
|
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Lockerungsversuche' aus dem Optionen-Dialog für die Bestpreis-Periodenberechnung.",
|
||||||
|
"usage_tips": "Höhere Werte machen die Periodenerkennung anpassungsfähiger an Tage mit stabilen Preisen. Setze auf 0, um strenge Kriterien ohne Lockerung zu erzwingen."
|
||||||
|
},
|
||||||
|
"best_price_gap_count_override": {
|
||||||
|
"description": "Maximale Anzahl teurerer Intervalle, die zwischen günstigen Intervallen erlaubt sind und trotzdem als eine zusammenhängende Periode gelten. Bei 0 müssen günstige Intervalle aufeinander folgen.",
|
||||||
|
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Lückentoleranz' aus dem Optionen-Dialog für die Bestpreis-Periodenberechnung.",
|
||||||
|
"usage_tips": "Erhöhe dies für Geräte mit variabler Last (z.B. Wärmepumpen), die kurze teurere Intervalle tolerieren können. Setze auf 0 für kontinuierliche günstige Perioden."
|
||||||
|
},
|
||||||
|
"peak_price_flex_override": {
|
||||||
|
"description": "Maximaler Prozentsatz unter dem Tagesmaximumpreis, den Intervalle haben können und trotzdem als 'Spitzenpreis' gelten. Gleiche Empfehlungen wie für Bestpreis-Flexibilität.",
|
||||||
|
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Flexibilität' aus dem Optionen-Dialog für die Spitzenpreis-Periodenberechnung.",
|
||||||
|
"usage_tips": "Nutze dies, um den Spitzenpreis-Schwellenwert zur Laufzeit für Automatisierungen anzupassen, die den Verbrauch während teurer Stunden vermeiden."
|
||||||
|
},
|
||||||
|
"peak_price_min_distance_override": {
|
||||||
|
"description": "Minimaler prozentualer Abstand über dem Tagesdurchschnitt. Intervalle müssen so weit über dem Durchschnitt liegen, um als 'Spitzenpreis' zu gelten.",
|
||||||
|
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestabstand' aus dem Optionen-Dialog für die Spitzenpreis-Periodenberechnung.",
|
||||||
|
"usage_tips": "Erhöhe den Wert, um nur extreme Preisspitzen zu erfassen. Verringere ihn, um mehr Hochpreiszeiten einzubeziehen."
|
||||||
|
},
|
||||||
|
"peak_price_min_period_length_override": {
|
||||||
|
"description": "Minimale Periodenl\u00e4nge in 15-Minuten-Intervallen für Spitzenpreise. Kürzere Preisspitzen werden nicht als Perioden gemeldet.",
|
||||||
|
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestperiodenlänge' aus dem Optionen-Dialog für die Spitzenpreis-Periodenberechnung.",
|
||||||
|
"usage_tips": "Kürzere Werte erfassen kurze Preisspitzen. Längere Werte fokussieren auf anhaltende Hochpreisphasen."
|
||||||
|
},
|
||||||
|
"peak_price_min_periods_override": {
|
||||||
|
"description": "Minimale Anzahl an Spitzenpreis-Perioden, die täglich gefunden werden sollen.",
|
||||||
|
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestperioden' aus dem Optionen-Dialog für die Spitzenpreis-Periodenberechnung.",
|
||||||
|
"usage_tips": "Setze dies basierend darauf, wie viele Hochpreisphasen du pro Tag für Automatisierungen erfassen möchtest."
|
||||||
|
},
|
||||||
|
"peak_price_relaxation_attempts_override": {
|
||||||
|
"description": "Anzahl der Versuche, die Kriterien zu lockern, um die Mindestanzahl an Spitzenpreis-Perioden zu erreichen.",
|
||||||
|
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Lockerungsversuche' aus dem Optionen-Dialog für die Spitzenpreis-Periodenberechnung.",
|
||||||
|
"usage_tips": "Erhöhe dies, wenn an Tagen mit stabilen Preisen keine Perioden gefunden werden. Setze auf 0, um strenge Kriterien zu erzwingen."
|
||||||
|
},
|
||||||
|
"peak_price_gap_count_override": {
|
||||||
|
"description": "Maximale Anzahl günstigerer Intervalle, die zwischen teuren Intervallen erlaubt sind und trotzdem als eine Spitzenpreis-Periode gelten.",
|
||||||
|
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Lückentoleranz' aus dem Optionen-Dialog für die Spitzenpreis-Periodenberechnung.",
|
||||||
|
"usage_tips": "Höhere Werte erfassen längere Hochpreisphasen auch mit kurzen Preiseinbrüchen. Setze auf 0, um strikt zusammenhängende Spitzenpreise zu erfassen."
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"switch": {
|
||||||
|
"best_price_enable_relaxation_override": {
|
||||||
|
"description": "Wenn aktiviert, werden die Kriterien automatisch gelockert, um die Mindestperiodenanzahl zu erreichen. Wenn deaktiviert, werden nur Perioden gemeldet, die die strengen Kriterien erfüllen (möglicherweise null Perioden bei stabilen Preisen).",
|
||||||
|
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestanzahl erreichen' aus dem Optionen-Dialog für die Bestpreis-Periodenberechnung.",
|
||||||
|
"usage_tips": "Aktiviere dies für garantierte tägliche Automatisierungsmöglichkeiten. Deaktiviere es, wenn du nur wirklich günstige Zeiträume willst, auch wenn das bedeutet, dass an manchen Tagen keine Perioden gefunden werden."
|
||||||
|
},
|
||||||
|
"peak_price_enable_relaxation_override": {
|
||||||
|
"description": "Wenn aktiviert, werden die Kriterien automatisch gelockert, um die Mindestperiodenanzahl zu erreichen. Wenn deaktiviert, werden nur echte Preisspitzen gemeldet.",
|
||||||
|
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestanzahl erreichen' aus dem Optionen-Dialog für die Spitzenpreis-Periodenberechnung.",
|
||||||
|
"usage_tips": "Aktiviere dies für konsistente Spitzenpreis-Warnungen. Deaktiviere es, um nur extreme Preisspitzen zu erfassen."
|
||||||
|
}
|
||||||
|
},
|
||||||
"home_types": {
|
"home_types": {
|
||||||
"APARTMENT": "Wohnung",
|
"APARTMENT": "Wohnung",
|
||||||
"ROWHOUSE": "Reihenhaus",
|
"ROWHOUSE": "Reihenhaus",
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,9 @@
|
||||||
"apexcharts": {
|
"apexcharts": {
|
||||||
"title_rating_level": "Price Phases Daily Progress",
|
"title_rating_level": "Price Phases Daily Progress",
|
||||||
"title_level": "Price Level",
|
"title_level": "Price Level",
|
||||||
|
"hourly_suffix": "(Ø hourly)",
|
||||||
"best_price_period_name": "Best Price Period",
|
"best_price_period_name": "Best Price Period",
|
||||||
|
"peak_price_period_name": "Peak Price Period",
|
||||||
"notification": {
|
"notification": {
|
||||||
"metadata_sensor_unavailable": {
|
"metadata_sensor_unavailable": {
|
||||||
"title": "Tibber Prices: ApexCharts YAML Generated with Limited Functionality",
|
"title": "Tibber Prices: ApexCharts YAML Generated with Limited Functionality",
|
||||||
|
|
@ -290,24 +292,24 @@
|
||||||
"long_description": "Shows the timestamp of the latest available price data interval from your Tibber subscription"
|
"long_description": "Shows the timestamp of the latest available price data interval from your Tibber subscription"
|
||||||
},
|
},
|
||||||
"today_volatility": {
|
"today_volatility": {
|
||||||
"description": "Price volatility classification for today",
|
"description": "How much electricity prices change throughout today",
|
||||||
"long_description": "Shows how much electricity prices vary throughout today based on the spread (difference between highest and lowest price). Classification: low = spread < 5ct, moderate = 5-15ct, high = 15-30ct, very high = >30ct.",
|
"long_description": "Indicates whether today's prices are stable or have big swings. Low volatility means prices stay fairly consistent—timing doesn't matter much. High volatility means significant price differences throughout the day—great opportunity to shift consumption to cheaper periods. Check `price_coefficient_variation_%` for the variance percentage and `price_spread` for the absolute price span.",
|
||||||
"usage_tips": "Use this to decide if price-based optimization is worthwhile. For example, with a balcony battery that has 15% efficiency losses, optimization only makes sense when volatility is at least moderate. Create automations that check volatility before scheduling charging/discharging cycles."
|
"usage_tips": "Use this to decide if optimization is worth your effort. On low-volatility days, you can run devices anytime. On high-volatility days, following Best Price periods saves meaningful money."
|
||||||
},
|
},
|
||||||
"tomorrow_volatility": {
|
"tomorrow_volatility": {
|
||||||
"description": "Price volatility classification for tomorrow",
|
"description": "How much electricity prices will change tomorrow",
|
||||||
"long_description": "Shows how much electricity prices will vary throughout tomorrow based on the spread (difference between highest and lowest price). Becomes unavailable until tomorrow's data is published (typically 13:00-14:00 CET).",
|
"long_description": "Indicates whether tomorrow's prices will be stable or have big swings. Available once tomorrow's data is published (typically 13:00-14:00 CET). Low volatility means prices stay fairly consistent—timing isn't critical. High volatility means significant price differences throughout the day—good opportunity for scheduling energy-intensive activities. Check `price_coefficient_variation_%` for the variance percentage and `price_spread` for the absolute price span.",
|
||||||
"usage_tips": "Use this for advance planning of tomorrow's energy usage. If tomorrow has high or very high volatility, it's worth optimizing energy consumption timing. If low, you can run devices anytime without significant cost differences."
|
"usage_tips": "Use for planning tomorrow's energy consumption. High volatility? Schedule flexible loads during Best Price periods. Low volatility? Run devices whenever is convenient."
|
||||||
},
|
},
|
||||||
"next_24h_volatility": {
|
"next_24h_volatility": {
|
||||||
"description": "Price volatility classification for the rolling next 24 hours",
|
"description": "How much prices will change over the next 24 hours",
|
||||||
"long_description": "Shows how much electricity prices vary in the next 24 hours from now (rolling window). This crosses day boundaries and updates every 15 minutes, providing a forward-looking volatility assessment independent of calendar days.",
|
"long_description": "Indicates price volatility for a rolling 24-hour window from now (updates every 15 minutes). Low volatility means prices stay fairly consistent. High volatility means significant price swings offer optimization opportunities. Unlike today/tomorrow sensors, this crosses day boundaries and provides a continuous forward-looking assessment. Check `price_coefficient_variation_%` for the variance percentage and `price_spread` for the absolute price span.",
|
||||||
"usage_tips": "Best sensor for real-time optimization decisions. Unlike today/tomorrow sensors that switch at midnight, this provides continuous 24h volatility assessment. Use for battery charging strategies that span across day boundaries."
|
"usage_tips": "Best for real-time decisions. Use when planning battery charging strategies or other flexible loads that might span across midnight. Provides consistent 24h perspective regardless of calendar day."
|
||||||
},
|
},
|
||||||
"today_tomorrow_volatility": {
|
"today_tomorrow_volatility": {
|
||||||
"description": "Combined price volatility classification for today and tomorrow",
|
"description": "Combined price volatility across today and tomorrow",
|
||||||
"long_description": "Shows volatility across both today and tomorrow combined (when tomorrow's data is available). Provides an extended view of price variation spanning up to 48 hours. Falls back to today-only when tomorrow's data isn't available yet.",
|
"long_description": "Shows overall price volatility when considering both today and tomorrow together (when available). Indicates whether there are significant price differences across the day boundary. Falls back to today-only when tomorrow's data isn't available yet. Useful for understanding multi-day optimization opportunities. Check `price_coefficient_variation_%` for the variance percentage and `price_spread` for the absolute price span.",
|
||||||
"usage_tips": "Use this for multi-day planning and to understand if price opportunities exist across the day boundary. The 'today_volatility' and 'tomorrow_volatility' breakdown attributes show individual day contributions. Useful for scheduling charging sessions that might span midnight."
|
"usage_tips": "Use for planning tasks that span multiple days. Check if prices vary enough to make scheduling worthwhile. The individual day volatility sensors show breakdown per day if you need more detail."
|
||||||
},
|
},
|
||||||
"data_lifecycle_status": {
|
"data_lifecycle_status": {
|
||||||
"description": "Current state of price data lifecycle and caching",
|
"description": "Current state of price data lifecycle and caching",
|
||||||
|
|
@ -320,14 +322,14 @@
|
||||||
"usage_tips": "Use this to display a countdown like 'Cheap period ends in 2 hours' (when active) or 'Next cheap period ends at 14:00' (when inactive). Home Assistant automatically shows relative time for timestamp sensors."
|
"usage_tips": "Use this to display a countdown like 'Cheap period ends in 2 hours' (when active) or 'Next cheap period ends at 14:00' (when inactive). Home Assistant automatically shows relative time for timestamp sensors."
|
||||||
},
|
},
|
||||||
"best_price_period_duration": {
|
"best_price_period_duration": {
|
||||||
"description": "Total length of current or next best price period in minutes",
|
"description": "Total length of current or next best price period",
|
||||||
"long_description": "Shows how long the best price period lasts in total. During an active period, shows the duration of the current period. When no period is active, shows the duration of the next upcoming period. Returns 'Unknown' only when no periods are configured.",
|
"long_description": "Shows how long the best price period lasts in total. The state is displayed in hours (e.g., 1.5 h) for easy reading in the UI, while the `period_duration_minutes` attribute provides the same value in minutes (e.g., 90) for use in automations. During an active period, shows the duration of the current period. When no period is active, shows the duration of the next upcoming period. Returns 'Unknown' only when no periods are configured.",
|
||||||
"usage_tips": "Useful for planning: 'The next cheap period lasts 90 minutes' or 'Current cheap period is 120 minutes long'. Combine with remaining_minutes to calculate when to start long-running appliances."
|
"usage_tips": "For display: Use the state value (hours) in dashboards. For automations: Use `period_duration_minutes` attribute to check if there's enough time for long-running tasks (e.g., 'If period_duration_minutes >= 90, start washing machine')."
|
||||||
},
|
},
|
||||||
"best_price_remaining_minutes": {
|
"best_price_remaining_minutes": {
|
||||||
"description": "Minutes remaining in current best price period (0 when inactive)",
|
"description": "Time remaining in current best price period",
|
||||||
"long_description": "Shows how many minutes are left in the current best price period. Returns 0 when no period is active. Updates every minute. Check binary_sensor.best_price_period to see if a period is currently active.",
|
"long_description": "Shows how much time is left in the current best price period. The state displays in hours (e.g., 0.5 h) for easy reading, while the `remaining_minutes` attribute provides minutes (e.g., 30) for automation logic. Returns 0 when no period is active. Updates every minute. Check binary_sensor.best_price_period to see if a period is currently active.",
|
||||||
"usage_tips": "Perfect for automations: 'If remaining_minutes > 0 AND remaining_minutes < 30, start washing machine now'. The value 0 makes it easy to check if a period is active (value > 0) or not (value = 0)."
|
"usage_tips": "For automations: Use `remaining_minutes` attribute with numeric comparisons like 'If remaining_minutes > 0 AND remaining_minutes < 30, start washing machine now'. The value 0 makes it easy to check if a period is active (value > 0) or not (value = 0)."
|
||||||
},
|
},
|
||||||
"best_price_progress": {
|
"best_price_progress": {
|
||||||
"description": "Progress through current best price period (0% when inactive)",
|
"description": "Progress through current best price period (0% when inactive)",
|
||||||
|
|
@ -340,9 +342,9 @@
|
||||||
"usage_tips": "Always useful for planning ahead: 'Next cheap period starts in 3 hours' (whether you're in a period now or not). Combine with automations: 'When next start time is in 10 minutes, send notification to prepare washing machine'."
|
"usage_tips": "Always useful for planning ahead: 'Next cheap period starts in 3 hours' (whether you're in a period now or not). Combine with automations: 'When next start time is in 10 minutes, send notification to prepare washing machine'."
|
||||||
},
|
},
|
||||||
"best_price_next_in_minutes": {
|
"best_price_next_in_minutes": {
|
||||||
"description": "Minutes until next best price period starts (0 when in transition)",
|
"description": "Time until next best price period starts",
|
||||||
"long_description": "Shows minutes until the next best price period starts. During an active period, shows time until the period AFTER the current one. Returns 0 during brief transition moments. Updates every minute.",
|
"long_description": "Shows how long until the next best price period starts. The state displays in hours (e.g., 2.25 h) for dashboards, while the `next_in_minutes` attribute provides minutes (e.g., 135) for automation conditions. During an active period, shows time until the period AFTER the current one. Returns 0 during brief transition moments. Updates every minute.",
|
||||||
"usage_tips": "Perfect for 'wait until cheap period' automations: 'If next_in_minutes > 0 AND next_in_minutes < 15, wait before starting dishwasher'. Value > 0 always indicates a future period is scheduled."
|
"usage_tips": "For automations: Use `next_in_minutes` attribute like 'If next_in_minutes > 0 AND next_in_minutes < 15, wait before starting dishwasher'. Value > 0 always indicates a future period is scheduled."
|
||||||
},
|
},
|
||||||
"peak_price_end_time": {
|
"peak_price_end_time": {
|
||||||
"description": "When the current or next peak price period ends",
|
"description": "When the current or next peak price period ends",
|
||||||
|
|
@ -350,14 +352,14 @@
|
||||||
"usage_tips": "Use this to display 'Expensive period ends in 1 hour' (when active) or 'Next expensive period ends at 18:00' (when inactive). Combine with automations to resume operations after peak."
|
"usage_tips": "Use this to display 'Expensive period ends in 1 hour' (when active) or 'Next expensive period ends at 18:00' (when inactive). Combine with automations to resume operations after peak."
|
||||||
},
|
},
|
||||||
"peak_price_period_duration": {
|
"peak_price_period_duration": {
|
||||||
"description": "Total length of current or next peak price period in minutes",
|
"description": "Total length of current or next peak price period",
|
||||||
"long_description": "Shows how long the peak price period lasts in total. During an active period, shows the duration of the current period. When no period is active, shows the duration of the next upcoming period. Returns 'Unknown' only when no periods are configured.",
|
"long_description": "Shows how long the peak price period lasts in total. The state is displayed in hours (e.g., 0.75 h) for easy reading in the UI, while the `period_duration_minutes` attribute provides the same value in minutes (e.g., 45) for use in automations. During an active period, shows the duration of the current period. When no period is active, shows the duration of the next upcoming period. Returns 'Unknown' only when no periods are configured.",
|
||||||
"usage_tips": "Useful for planning: 'The next expensive period lasts 60 minutes' or 'Current peak is 90 minutes long'. Combine with remaining_minutes to decide whether to wait out the peak or proceed with operations."
|
"usage_tips": "For display: Use the state value (hours) in dashboards. For automations: Use `period_duration_minutes` attribute to decide whether to wait out the peak or proceed (e.g., 'If period_duration_minutes <= 60, pause operations')."
|
||||||
},
|
},
|
||||||
"peak_price_remaining_minutes": {
|
"peak_price_remaining_minutes": {
|
||||||
"description": "Minutes remaining in current peak price period (0 when inactive)",
|
"description": "Time remaining in current peak price period",
|
||||||
"long_description": "Shows how many minutes are left in the current peak price period. Returns 0 when no period is active. Updates every minute. Check binary_sensor.peak_price_period to see if a period is currently active.",
|
"long_description": "Shows how much time is left in the current peak price period. The state displays in hours (e.g., 1.0 h) for easy reading, while the `remaining_minutes` attribute provides minutes (e.g., 60) for automation logic. Returns 0 when no period is active. Updates every minute. Check binary_sensor.peak_price_period to see if a period is currently active.",
|
||||||
"usage_tips": "Use in automations: 'If remaining_minutes > 60, cancel deferred charging session'. Value 0 makes it easy to distinguish active (value > 0) from inactive (value = 0) periods."
|
"usage_tips": "For automations: Use `remaining_minutes` attribute like 'If remaining_minutes > 60, cancel deferred charging session'. Value 0 makes it easy to distinguish active (value > 0) from inactive (value = 0) periods."
|
||||||
},
|
},
|
||||||
"peak_price_progress": {
|
"peak_price_progress": {
|
||||||
"description": "Progress through current peak price period (0% when inactive)",
|
"description": "Progress through current peak price period (0% when inactive)",
|
||||||
|
|
@ -370,9 +372,9 @@
|
||||||
"usage_tips": "Always useful for planning: 'Next expensive period starts in 2 hours'. Automation: 'When next start time is in 30 minutes, reduce heating temperature preemptively'."
|
"usage_tips": "Always useful for planning: 'Next expensive period starts in 2 hours'. Automation: 'When next start time is in 30 minutes, reduce heating temperature preemptively'."
|
||||||
},
|
},
|
||||||
"peak_price_next_in_minutes": {
|
"peak_price_next_in_minutes": {
|
||||||
"description": "Minutes until next peak price period starts (0 when in transition)",
|
"description": "Time until next peak price period starts",
|
||||||
"long_description": "Shows minutes until the next peak price period starts. During an active period, shows time until the period AFTER the current one. Returns 0 during brief transition moments. Updates every minute.",
|
"long_description": "Shows how long until the next peak price period starts. The state displays in hours (e.g., 0.5 h) for dashboards, while the `next_in_minutes` attribute provides minutes (e.g., 30) for automation conditions. During an active period, shows time until the period AFTER the current one. Returns 0 during brief transition moments. Updates every minute.",
|
||||||
"usage_tips": "Pre-emptive automation: 'If next_in_minutes > 0 AND next_in_minutes < 10, complete current charging cycle now before prices increase'."
|
"usage_tips": "For automations: Use `next_in_minutes` attribute like 'If next_in_minutes > 0 AND next_in_minutes < 10, complete current charging cycle now before prices increase'."
|
||||||
},
|
},
|
||||||
"home_type": {
|
"home_type": {
|
||||||
"description": "Type of home (apartment, house, etc.)",
|
"description": "Type of home (apartment, house, etc.)",
|
||||||
|
|
@ -487,6 +489,80 @@
|
||||||
"usage_tips": "Use this to verify that realtime consumption data is available. Enable notifications if this changes to 'off' unexpectedly, indicating potential hardware or connectivity issues."
|
"usage_tips": "Use this to verify that realtime consumption data is available. Enable notifications if this changes to 'off' unexpectedly, indicating potential hardware or connectivity issues."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"number": {
|
||||||
|
"best_price_flex_override": {
|
||||||
|
"description": "Maximum above the daily minimum price that intervals can be and still qualify as 'best price'. Recommended: 15-20 with relaxation enabled (default), or 25-35 without relaxation. Maximum: 50 (hard cap for reliable period detection).",
|
||||||
|
"long_description": "When this entity is enabled, its value overrides the 'Flexibility' setting from the options flow for best price period calculations.",
|
||||||
|
"usage_tips": "Enable this entity to dynamically adjust best price detection via automations. Higher values create longer periods, lower values are stricter."
|
||||||
|
},
|
||||||
|
"best_price_min_distance_override": {
|
||||||
|
"description": "Ensures periods are significantly cheaper than the daily average, not just marginally below it. This filters out noise and prevents marking slightly-below-average periods as 'best price' on days with flat prices. Higher values = stricter filtering (only truly cheap periods qualify).",
|
||||||
|
"long_description": "When this entity is enabled, its value overrides the 'Minimum Distance' setting from the options flow for best price period calculations.",
|
||||||
|
"usage_tips": "Use in automations to adjust how much better than average the best price periods must be. Higher values require prices to be further below average."
|
||||||
|
},
|
||||||
|
"best_price_min_period_length_override": {
|
||||||
|
"description": "Minimum duration for a period to be considered as 'best price'. Longer periods are more practical for running appliances like dishwashers or heat pumps. Best price periods require 60 minutes minimum (vs. 30 minutes for peak price warnings) because they should provide meaningful time windows for consumption planning.",
|
||||||
|
"long_description": "When this entity is enabled, its value overrides the 'Minimum Period Length' setting from the options flow for best price period calculations.",
|
||||||
|
"usage_tips": "Increase when your appliances need longer uninterrupted run times (e.g., washing machines, dishwashers)."
|
||||||
|
},
|
||||||
|
"best_price_min_periods_override": {
|
||||||
|
"description": "Minimum number of best price periods to aim for per day. Filters will be relaxed step-by-step to try achieving this count. Only active when 'Achieve Minimum Count' is enabled.",
|
||||||
|
"long_description": "When this entity is enabled, its value overrides the 'Minimum Periods' setting from the options flow for best price period calculations.",
|
||||||
|
"usage_tips": "Adjust dynamically based on how many times per day you need cheap electricity windows."
|
||||||
|
},
|
||||||
|
"best_price_relaxation_attempts_override": {
|
||||||
|
"description": "How many flex levels (attempts) to try before giving up. Each attempt runs all filter combinations at the new flex level. More attempts increase the chance of finding additional periods at the cost of longer processing time.",
|
||||||
|
"long_description": "When this entity is enabled, its value overrides the 'Relaxation Attempts' setting from the options flow for best price period calculations.",
|
||||||
|
"usage_tips": "Increase when periods are hard to find. Decrease for stricter price filtering."
|
||||||
|
},
|
||||||
|
"best_price_gap_count_override": {
|
||||||
|
"description": "Maximum number of consecutive intervals allowed that deviate by exactly one level step from the required level. This prevents periods from being split by occasional level deviations. Gap tolerance requires periods ≥90 minutes (6 intervals) to detect outliers effectively.",
|
||||||
|
"long_description": "When this entity is enabled, its value overrides the 'Gap Tolerance' setting from the options flow for best price period calculations.",
|
||||||
|
"usage_tips": "Increase to allow longer periods with occasional price spikes. Keep low for stricter continuous cheap periods."
|
||||||
|
},
|
||||||
|
"peak_price_flex_override": {
|
||||||
|
"description": "Maximum below the daily maximum price that intervals can be and still qualify as 'peak price'. Recommended: -15 to -20 with relaxation enabled (default), or -25 to -35 without relaxation. Maximum: -50 (hard cap for reliable period detection). Note: Negative values indicate distance below maximum.",
|
||||||
|
"long_description": "When this entity is enabled, its value overrides the 'Flexibility' setting from the options flow for peak price period calculations.",
|
||||||
|
"usage_tips": "Enable this entity to dynamically adjust peak price detection via automations. Higher values create longer peak periods."
|
||||||
|
},
|
||||||
|
"peak_price_min_distance_override": {
|
||||||
|
"description": "Ensures periods are significantly more expensive than the daily average, not just marginally above it. This filters out noise and prevents marking slightly-above-average periods as 'peak price' on days with flat prices. Higher values = stricter filtering (only truly expensive periods qualify).",
|
||||||
|
"long_description": "When this entity is enabled, its value overrides the 'Minimum Distance' setting from the options flow for peak price period calculations.",
|
||||||
|
"usage_tips": "Use in automations to adjust how much higher than average the peak price periods must be."
|
||||||
|
},
|
||||||
|
"peak_price_min_period_length_override": {
|
||||||
|
"description": "Minimum duration for a period to be considered as 'peak price'. Peak price warnings are allowed for shorter periods (30 minutes minimum vs. 60 minutes for best price) because brief expensive spikes are worth alerting about, even if they're too short for consumption planning.",
|
||||||
|
"long_description": "When this entity is enabled, its value overrides the 'Minimum Period Length' setting from the options flow for peak price period calculations.",
|
||||||
|
"usage_tips": "Increase to filter out brief price spikes, focusing on sustained expensive periods."
|
||||||
|
},
|
||||||
|
"peak_price_min_periods_override": {
|
||||||
|
"description": "Minimum number of peak price periods to aim for per day. Filters will be relaxed step-by-step to try achieving this count. Only active when 'Achieve Minimum Count' is enabled.",
|
||||||
|
"long_description": "When this entity is enabled, its value overrides the 'Minimum Periods' setting from the options flow for peak price period calculations.",
|
||||||
|
"usage_tips": "Adjust based on how many peak periods you want to identify and avoid."
|
||||||
|
},
|
||||||
|
"peak_price_relaxation_attempts_override": {
|
||||||
|
"description": "How many flex levels (attempts) to try before giving up. Each attempt runs all filter combinations at the new flex level. More attempts increase the chance of finding additional peak periods at the cost of longer processing time.",
|
||||||
|
"long_description": "When this entity is enabled, its value overrides the 'Relaxation Attempts' setting from the options flow for peak price period calculations.",
|
||||||
|
"usage_tips": "Increase when peak periods are hard to detect. Decrease for stricter peak price filtering."
|
||||||
|
},
|
||||||
|
"peak_price_gap_count_override": {
|
||||||
|
"description": "Maximum number of consecutive intervals allowed that deviate by exactly one level step from the required level. This prevents periods from being split by occasional level deviations. Gap tolerance requires periods ≥90 minutes (6 intervals) to detect outliers effectively.",
|
||||||
|
"long_description": "When this entity is enabled, its value overrides the 'Gap Tolerance' setting from the options flow for peak price period calculations.",
|
||||||
|
"usage_tips": "Increase to identify sustained expensive periods with brief dips. Keep low for stricter continuous peak detection."
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"switch": {
|
||||||
|
"best_price_enable_relaxation_override": {
|
||||||
|
"description": "When enabled, filters will be gradually relaxed if not enough periods are found. This attempts to reach the desired minimum number of periods, which may include less optimal time windows as best-price periods.",
|
||||||
|
"long_description": "When this entity is enabled, its value overrides the 'Achieve Minimum Count' setting from the options flow for best price period calculations.",
|
||||||
|
"usage_tips": "Turn OFF to disable relaxation and use strict filtering only. Turn ON to allow the algorithm to relax criteria to find more periods."
|
||||||
|
},
|
||||||
|
"peak_price_enable_relaxation_override": {
|
||||||
|
"description": "When enabled, filters will be gradually relaxed if not enough periods are found. This attempts to reach the desired minimum number of periods to ensure you're warned about expensive periods even on days with unusual price patterns.",
|
||||||
|
"long_description": "When this entity is enabled, its value overrides the 'Achieve Minimum Count' setting from the options flow for peak price period calculations.",
|
||||||
|
"usage_tips": "Turn OFF to disable relaxation and use strict filtering only. Turn ON to allow the algorithm to relax criteria to find more peak periods."
|
||||||
|
}
|
||||||
|
},
|
||||||
"home_types": {
|
"home_types": {
|
||||||
"APARTMENT": "Apartment",
|
"APARTMENT": "Apartment",
|
||||||
"ROWHOUSE": "Rowhouse",
|
"ROWHOUSE": "Rowhouse",
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,9 @@
|
||||||
"apexcharts": {
|
"apexcharts": {
|
||||||
"title_rating_level": "Prisfaser dagsfremdrift",
|
"title_rating_level": "Prisfaser dagsfremdrift",
|
||||||
"title_level": "Prisnivå",
|
"title_level": "Prisnivå",
|
||||||
|
"hourly_suffix": "(Ø per time)",
|
||||||
"best_price_period_name": "Beste prisperiode",
|
"best_price_period_name": "Beste prisperiode",
|
||||||
|
"peak_price_period_name": "Toppprisperiode",
|
||||||
"notification": {
|
"notification": {
|
||||||
"metadata_sensor_unavailable": {
|
"metadata_sensor_unavailable": {
|
||||||
"title": "Tibber Prices: ApexCharts YAML generert med begrenset funksjonalitet",
|
"title": "Tibber Prices: ApexCharts YAML generert med begrenset funksjonalitet",
|
||||||
|
|
@ -290,24 +292,24 @@
|
||||||
"long_description": "Viser tidsstempelet for siste tilgjengelige prisdataintervall fra ditt Tibber-abonnement"
|
"long_description": "Viser tidsstempelet for siste tilgjengelige prisdataintervall fra ditt Tibber-abonnement"
|
||||||
},
|
},
|
||||||
"today_volatility": {
|
"today_volatility": {
|
||||||
"description": "Prisvolatilitetsklassifisering for i dag",
|
"description": "Hvor mye strømprisene endrer seg i dag",
|
||||||
"long_description": "Viser hvor mye strømprisene varierer gjennom dagen basert på spredningen (forskjellen mellom høyeste og laveste pris). Klassifisering: lav = spredning < 5øre, moderat = 5-15øre, høy = 15-30øre, veldig høy = >30øre.",
|
"long_description": "Viser om dagens priser er stabile eller har store svingninger. Lav volatilitet betyr ganske jevne priser – timing betyr lite. Høy volatilitet betyr tydelige prisforskjeller gjennom dagen – en god sjanse til å flytte forbruk til billigere perioder. `price_coefficient_variation_%` viser prosentverdien, `price_spread` viser den absolutte prisspennet.",
|
||||||
"usage_tips": "Bruk dette til å bestemme om prisbasert optimalisering er verdt det. For eksempel, med et balkongbatteri som har 15% effektivitetstap, er optimalisering kun meningsfull når volatiliteten er minst moderat. Opprett automatiseringer som sjekker volatilitet før planlegging av lade-/utladingssykluser."
|
"usage_tips": "Bruk dette for å avgjøre om optimalisering er verdt innsatsen. Ved lav volatilitet kan du kjøre enheter når som helst. Ved høy volatilitet sparer du merkbart ved å følge Best Price-perioder."
|
||||||
},
|
},
|
||||||
"tomorrow_volatility": {
|
"tomorrow_volatility": {
|
||||||
"description": "Prisvolatilitetsklassifisering for i morgen",
|
"description": "Hvor mye strømprisene vil endre seg i morgen",
|
||||||
"long_description": "Viser hvor mye strømprisene vil variere gjennom morgendagen basert på spredningen (forskjellen mellom høyeste og laveste pris). Blir utilgjengelig til morgendagens data er publisert (typisk 13:00-14:00 CET).",
|
"long_description": "Viser om prisene i morgen blir stabile eller får store svingninger. Tilgjengelig når morgendagens data er publisert (vanligvis 13:00–14:00 CET). Lav volatilitet betyr jevne priser – timing er ikke kritisk. Høy volatilitet betyr tydelige prisforskjeller gjennom dagen – en god mulighet til å planlegge energikrevende oppgaver. `price_coefficient_variation_%` viser prosentverdien, `price_spread` viser den absolutte prisspennet.",
|
||||||
"usage_tips": "Bruk dette til forhåndsplanlegging av morgendagens energiforbruk. Hvis morgendagen har høy eller veldig høy volatilitet, er det verdt å optimalisere tidspunktet for energiforbruk. Hvis lav, kan du kjøre enheter når som helst uten betydelige kostnadsforskjeller."
|
"usage_tips": "Bruk dette til å planlegge morgendagens forbruk. Høy volatilitet? Planlegg fleksible laster i Best Price-perioder. Lav volatilitet? Kjør enheter når det passer deg."
|
||||||
},
|
},
|
||||||
"next_24h_volatility": {
|
"next_24h_volatility": {
|
||||||
"description": "Prisvolatilitetsklassifisering for de rullerende neste 24 timene",
|
"description": "Hvor mye prisene endrer seg de neste 24 timene",
|
||||||
"long_description": "Viser hvor mye strømprisene varierer i de neste 24 timene fra nå (rullerende vindu). Dette krysser daggrenser og oppdateres hvert 15. minutt, og gir en fremoverskuende volatilitetsvurdering uavhengig av kalenderdager.",
|
"long_description": "Viser prisvolatilitet for et rullerende 24-timers vindu fra nå (oppdateres hvert 15. minutt). Lav volatilitet betyr jevne priser. Høy volatilitet betyr merkbare prissvingninger og mulighet for optimalisering. I motsetning til i dag/i morgen-sensorer krysser denne daggrenser og gir en kontinuerlig fremoverskuende vurdering. `price_coefficient_variation_%` viser prosentverdien, `price_spread` viser den absolutte prisspennet.",
|
||||||
"usage_tips": "Beste sensor for sanntids optimaliseringsbeslutninger. I motsetning til dagens/morgendagens sensorer som bytter ved midnatt, gir denne kontinuerlig 24t volatilitetsvurdering. Bruk til batteriladingsstrategier som spenner over daggrenser."
|
"usage_tips": "Best for beslutninger i sanntid. Bruk når du planlegger batterilading eller andre fleksible laster som kan gå over midnatt. Gir et konsistent 24t-bilde uavhengig av kalenderdag."
|
||||||
},
|
},
|
||||||
"today_tomorrow_volatility": {
|
"today_tomorrow_volatility": {
|
||||||
"description": "Kombinert prisvolatilitetsklassifisering for i dag og i morgen",
|
"description": "Kombinert prisvolatilitet for i dag og i morgen",
|
||||||
"long_description": "Viser volatilitet på tvers av både i dag og i morgen kombinert (når morgendagens data er tilgjengelig). Gir en utvidet visning av prisvariasjoner som spenner over opptil 48 timer. Faller tilbake til bare i dag når morgendagens data ikke er tilgjengelig ennå.",
|
"long_description": "Viser samlet volatilitet når i dag og i morgen sees sammen (når morgendata er tilgjengelig). Viser om det finnes klare prisforskjeller over dagsgrensen. Faller tilbake til kun i dag hvis morgendata mangler. Nyttig for flerdagers optimalisering. `price_coefficient_variation_%` viser prosentverdien, `price_spread` viser den absolutte prisspennet.",
|
||||||
"usage_tips": "Bruk dette for flersdagers planlegging og for å forstå om prismuligheter eksisterer på tvers av dags grensen. Attributtene 'today_volatility' og 'tomorrow_volatility' viser individuelle dagbidrag. Nyttig for planlegging av ladeøkter som kan strekke seg over midnatt."
|
"usage_tips": "Bruk for oppgaver som går over flere dager. Sjekk om prisforskjellene er store nok til å planlegge etter. De enkelte dagssensorene viser bidrag per dag om du trenger mer detalj."
|
||||||
},
|
},
|
||||||
"data_lifecycle_status": {
|
"data_lifecycle_status": {
|
||||||
"description": "Gjeldende tilstand for prisdatalivssyklus og hurtigbufring",
|
"description": "Gjeldende tilstand for prisdatalivssyklus og hurtigbufring",
|
||||||
|
|
@ -315,39 +317,49 @@
|
||||||
"usage_tips": "Bruk denne diagnosesensoren for å forstå dataferskhet og API-anropsmønstre. Sjekk 'cache_age'-attributtet for å se hvor gamle de nåværende dataene er. Overvåk 'next_api_poll' for å vite når neste oppdatering er planlagt. Bruk 'data_completeness' for å se om data for i går/i dag/i morgen er tilgjengelig. 'api_calls_today'-telleren hjelper med å spore API-bruk. Perfekt for feilsøking eller forståelse av integrasjonens oppførsel."
|
"usage_tips": "Bruk denne diagnosesensoren for å forstå dataferskhet og API-anropsmønstre. Sjekk 'cache_age'-attributtet for å se hvor gamle de nåværende dataene er. Overvåk 'next_api_poll' for å vite når neste oppdatering er planlagt. Bruk 'data_completeness' for å se om data for i går/i dag/i morgen er tilgjengelig. 'api_calls_today'-telleren hjelper med å spore API-bruk. Perfekt for feilsøking eller forståelse av integrasjonens oppførsel."
|
||||||
},
|
},
|
||||||
"best_price_end_time": {
|
"best_price_end_time": {
|
||||||
"description": "Når gjeldende eller neste billigperiode slutter",
|
"description": "Total lengde på nåværende eller neste billigperiode (state i timer, attributt i minutter)",
|
||||||
"long_description": "Viser sluttidspunktet for gjeldende billigperiode når aktiv, eller slutten av neste periode når ingen periode er aktiv. Viser alltid en nyttig tidsreferanse for planlegging. Returnerer 'Ukjent' bare når ingen perioder er konfigurert.",
|
"long_description": "Viser hvor lenge billigperioden varer. State bruker timer (desimal) for lesbar UI; attributtet `period_duration_minutes` beholder avrundede minutter for automasjoner. Aktiv → varighet for gjeldende periode, ellers neste.",
|
||||||
"usage_tips": "Bruk dette til å vise en nedtelling som 'Billigperiode slutter om 2 timer' (når aktiv) eller 'Neste billigperiode slutter kl 14:00' (når inaktiv). Home Assistant viser automatisk relativ tid for tidsstempelsensorer."
|
"usage_tips": "UI kan vise 1,5 t mens `period_duration_minutes` = 90 for automasjoner."
|
||||||
|
},
|
||||||
|
"best_price_period_duration": {
|
||||||
|
"description": "Lengde på gjeldende/neste billigperiode",
|
||||||
|
"long_description": "Total varighet av gjeldende eller neste billigperiode. State vises i timer (f.eks. 1,5 t) for enkel lesing i UI, mens attributtet `period_duration_minutes` gir samme verdi i minutter (f.eks. 90) for automasjoner. Denne verdien representerer den **fulle planlagte varigheten** av perioden og er konstant gjennom hele perioden, selv om gjenværende tid (remaining_minutes) reduseres.",
|
||||||
|
"usage_tips": "Kombiner med remaining_minutes for å beregne når langvarige enheter skal stoppes: Perioden startet for `period_duration_minutes - remaining_minutes` minutter siden. Dette attributtet støtter energioptimeringsstrategier ved å hjelpe til med å planlegge høyforbruksaktiviteter innenfor billige perioder."
|
||||||
},
|
},
|
||||||
"best_price_remaining_minutes": {
|
"best_price_remaining_minutes": {
|
||||||
"description": "Gjenværende minutter i gjeldende billigperiode (0 når inaktiv)",
|
"description": "Gjenværende tid i gjeldende billigperiode",
|
||||||
"long_description": "Viser hvor mange minutter som er igjen i gjeldende billigperiode. Returnerer 0 når ingen periode er aktiv. Oppdateres hvert minutt. Sjekk binary_sensor.best_price_period for å se om en periode er aktiv.",
|
"long_description": "Viser hvor mye tid som gjenstår i gjeldende billigperiode. State vises i timer (f.eks. 0,75 t) for enkel lesing i dashboards, mens attributtet `remaining_minutes` gir samme tid i minutter (f.eks. 45) for automasjonsbetingelser. **Nedtellingstimer**: Denne verdien reduseres hvert minutt under en aktiv periode. Returnerer 0 når ingen billigperiode er aktiv. Oppdateres hvert minutt.",
|
||||||
"usage_tips": "Perfekt for automatiseringer: 'Hvis remaining_minutes > 0 OG remaining_minutes < 30, start vaskemaskin nå'. Verdien 0 gjør det enkelt å sjekke om en periode er aktiv (verdi > 0) eller ikke (verdi = 0)."
|
"usage_tips": "For automasjoner: Bruk attributtet `remaining_minutes` som 'Hvis remaining_minutes > 60, start oppvaskmaskinen nå (nok tid til å fullføre)' eller 'Hvis remaining_minutes < 15, fullfør gjeldende syklus snart'. UI viser brukervennlige timer (f.eks. 1,25 t). Verdi 0 indikerer ingen aktiv billigperiode."
|
||||||
},
|
},
|
||||||
"best_price_progress": {
|
"best_price_progress": {
|
||||||
"description": "Fremdrift gjennom gjeldende billigperiode (0% når inaktiv)",
|
"description": "Fremdrift gjennom gjeldende billigperiode (0% når inaktiv)",
|
||||||
"long_description": "Viser fremdrift gjennom gjeldende billigperiode som 0-100%. Returnerer 0% når ingen periode er aktiv. Oppdateres hvert minutt. 0% betyr periode nettopp startet, 100% betyr den snart slutter.",
|
"long_description": "Viser fremdrift gjennom gjeldende billigperiode som 0-100%. Returnerer 0% når ingen periode er aktiv. Oppdateres hvert minutt. 0% betyr perioden nettopp startet, 100% betyr den slutter snart.",
|
||||||
"usage_tips": "Flott for visuelle fremdriftslinjer. Bruk i automatiseringer: 'Hvis progress > 0 OG progress > 75, send varsel om at billigperiode snart slutter'. Verdi 0 indikerer ingen aktiv periode."
|
"usage_tips": "Flott for visuelle fremgangsindikatorer. Bruk i automatiseringer: 'Hvis progress > 0 OG progress > 75, send varsel om at billigperioden snart slutter'. Verdi 0 indikerer ingen aktiv periode."
|
||||||
},
|
},
|
||||||
"best_price_next_start_time": {
|
"best_price_next_start_time": {
|
||||||
"description": "Når neste billigperiode starter",
|
"description": "Total lengde på nåværende eller neste dyr-periode (state i timer, attributt i minutter)",
|
||||||
"long_description": "Viser når neste kommende billigperiode starter. Under en aktiv periode viser dette starten av NESTE periode etter den gjeldende. Returnerer 'Ukjent' bare når ingen fremtidige perioder er konfigurert.",
|
"long_description": "Viser hvor lenge den dyre perioden varer. State bruker timer (desimal) for UI; attributtet `period_duration_minutes` beholder avrundede minutter for automasjoner. Aktiv → varighet for gjeldende periode, ellers neste.",
|
||||||
"usage_tips": "Alltid nyttig for planlegging: 'Neste billigperiode starter om 3 timer' (enten du er i en periode nå eller ikke). Kombiner med automatiseringer: 'Når neste starttid er om 10 minutter, send varsel for å forberede vaskemaskin'."
|
"usage_tips": "UI kan vise 0,75 t mens `period_duration_minutes` = 45 for automasjoner."
|
||||||
},
|
},
|
||||||
"best_price_next_in_minutes": {
|
"best_price_next_in_minutes": {
|
||||||
"description": "Minutter til neste billigperiode starter (0 ved overgang)",
|
"description": "Tid til neste billigperiode",
|
||||||
"long_description": "Viser minutter til neste billigperiode starter. Under en aktiv periode viser dette tiden til perioden ETTER den gjeldende. Returnerer 0 under korte overgangsmomenter. Oppdateres hvert minutt.",
|
"long_description": "Viser hvor lenge til neste billigperiode. State vises i timer (f.eks. 2,25 t) for dashboards, mens attributtet `next_in_minutes` gir minutter (f.eks. 135) for automasjonsbetingelser. Under en aktiv periode viser dette tiden til perioden ETTER den gjeldende. Returnerer 0 under korte overgangsmomenter. Oppdateres hvert minutt.",
|
||||||
"usage_tips": "Perfekt for 'vent til billigperiode' automatiseringer: 'Hvis next_in_minutes > 0 OG next_in_minutes < 15, vent før oppvaskmaskin startes'. Verdi > 0 indikerer alltid at en fremtidig periode er planlagt."
|
"usage_tips": "For automasjoner: Bruk attributtet `next_in_minutes` som 'Hvis next_in_minutes > 0 OG next_in_minutes < 15, vent før start av oppvaskmaskin'. Verdi > 0 indikerer alltid at en fremtidig periode er planlagt."
|
||||||
},
|
},
|
||||||
"peak_price_end_time": {
|
"peak_price_end_time": {
|
||||||
"description": "Når gjeldende eller neste dyrperiode slutter",
|
"description": "Tid til neste dyr-periode (state i timer, attributt i minutter)",
|
||||||
"long_description": "Viser sluttidspunktet for gjeldende dyrperiode når aktiv, eller slutten av neste periode når ingen periode er aktiv. Viser alltid en nyttig tidsreferanse for planlegging. Returnerer 'Ukjent' bare når ingen perioder er konfigurert.",
|
"long_description": "Viser hvor lenge til neste dyre periode starter. State bruker timer (desimal); attributtet `next_in_minutes` beholder avrundede minutter for automasjoner. Under aktiv periode viser dette tiden til perioden etter den nåværende. 0 i korte overgangsøyeblikk. Oppdateres hvert minutt.",
|
||||||
"usage_tips": "Bruk dette til å vise 'Dyrperiode slutter om 1 time' (når aktiv) eller 'Neste dyrperiode slutter kl 18:00' (når inaktiv). Kombiner med automatiseringer for å gjenoppta drift etter topp."
|
"usage_tips": "Bruk `next_in_minutes` i automasjoner (f.eks. < 10) mens state er lett å lese i timer."
|
||||||
|
},
|
||||||
|
"peak_price_period_duration": {
|
||||||
|
"description": "Lengde på gjeldende/neste dyr periode",
|
||||||
|
"long_description": "Total varighet av gjeldende eller neste dyre periode. State vises i timer (f.eks. 1,5 t) for enkel lesing i UI, mens attributtet `period_duration_minutes` gir samme verdi i minutter (f.eks. 90) for automasjoner. Denne verdien representerer den **fulle planlagte varigheten** av perioden og er konstant gjennom hele perioden, selv om gjenværende tid (remaining_minutes) reduseres.",
|
||||||
|
"usage_tips": "Kombiner med remaining_minutes for å beregne når langvarige enheter skal stoppes: Perioden startet for `period_duration_minutes - remaining_minutes` minutter siden. Dette attributtet støtter energisparingsstrategier ved å hjelpe til med å planlegge høyforbruksaktiviteter utenfor dyre perioder."
|
||||||
},
|
},
|
||||||
"peak_price_remaining_minutes": {
|
"peak_price_remaining_minutes": {
|
||||||
"description": "Gjenværende minutter i gjeldende dyrperiode (0 når inaktiv)",
|
"description": "Gjenværende tid i gjeldende dyre periode",
|
||||||
"long_description": "Viser hvor mange minutter som er igjen i gjeldende dyrperiode. Returnerer 0 når ingen periode er aktiv. Oppdateres hvert minutt. Sjekk binary_sensor.peak_price_period for å se om en periode er aktiv.",
|
"long_description": "Viser hvor mye tid som gjenstår i gjeldende dyre periode. State vises i timer (f.eks. 0,75 t) for enkel lesing i dashboards, mens attributtet `remaining_minutes` gir samme tid i minutter (f.eks. 45) for automasjonsbetingelser. **Nedtellingstimer**: Denne verdien reduseres hvert minutt under en aktiv periode. Returnerer 0 når ingen dyr periode er aktiv. Oppdateres hvert minutt.",
|
||||||
"usage_tips": "Bruk i automatiseringer: 'Hvis remaining_minutes > 60, avbryt utsatt ladeøkt'. Verdi 0 gjør det enkelt å skille mellom aktive (verdi > 0) og inaktive (verdi = 0) perioder."
|
"usage_tips": "For automasjoner: Bruk attributtet `remaining_minutes` som 'Hvis remaining_minutes > 60, avbryt utsatt ladeøkt' eller 'Hvis remaining_minutes < 15, fortsett normal drift snart'. UI viser brukervennlige timer (f.eks. 1,0 t). Verdi 0 indikerer ingen aktiv dyr periode."
|
||||||
},
|
},
|
||||||
"peak_price_progress": {
|
"peak_price_progress": {
|
||||||
"description": "Fremdrift gjennom gjeldende dyrperiode (0% når inaktiv)",
|
"description": "Fremdrift gjennom gjeldende dyrperiode (0% når inaktiv)",
|
||||||
|
|
@ -360,19 +372,9 @@
|
||||||
"usage_tips": "Alltid nyttig for planlegging: 'Neste dyrperiode starter om 2 timer'. Automatisering: 'Når neste starttid er om 30 minutter, reduser varmetemperatur forebyggende'."
|
"usage_tips": "Alltid nyttig for planlegging: 'Neste dyrperiode starter om 2 timer'. Automatisering: 'Når neste starttid er om 30 minutter, reduser varmetemperatur forebyggende'."
|
||||||
},
|
},
|
||||||
"peak_price_next_in_minutes": {
|
"peak_price_next_in_minutes": {
|
||||||
"description": "Minutter til neste dyrperiode starter (0 ved overgang)",
|
"description": "Tid til neste dyre periode",
|
||||||
"long_description": "Viser minutter til neste dyrperiode starter. Under en aktiv periode viser dette tiden til perioden ETTER den gjeldende. Returnerer 0 under korte overgangsmomenter. Oppdateres hvert minutt.",
|
"long_description": "Viser hvor lenge til neste dyre periode starter. State vises i timer (f.eks. 0,5 t) for dashboards, mens attributtet `next_in_minutes` gir minutter (f.eks. 30) for automasjonsbetingelser. Under en aktiv periode viser dette tiden til perioden ETTER den gjeldende. Returnerer 0 under korte overgangsmomenter. Oppdateres hvert minutt.",
|
||||||
"usage_tips": "Forebyggende automatisering: 'Hvis next_in_minutes > 0 OG next_in_minutes < 10, fullfør gjeldende ladesyklus nå før prisene øker'."
|
"usage_tips": "For automasjoner: Bruk attributtet `next_in_minutes` som 'Hvis next_in_minutes > 0 OG next_in_minutes < 10, fullfør gjeldende ladesyklus nå før prisene øker'. Verdi > 0 indikerer alltid at en fremtidig dyr periode er planlagt."
|
||||||
},
|
|
||||||
"best_price_period_duration": {
|
|
||||||
"description": "Total varighet av gjeldende eller neste billigperiode i minutter",
|
|
||||||
"long_description": "Viser den totale varigheten av billigperioden i minutter. Under en aktiv periode viser dette hele varigheten av gjeldende periode. Når ingen periode er aktiv, viser dette varigheten av neste kommende periode. Eksempel: '90 minutter' for en 1,5-timers periode.",
|
|
||||||
"usage_tips": "Kombiner med remaining_minutes for å planlegge oppgaver: 'Hvis duration = 120 OG remaining_minutes > 90, start vaskemaskin (nok tid til å fullføre)'. Nyttig for å forstå om perioder er lange nok for strømkrevende oppgaver."
|
|
||||||
},
|
|
||||||
"peak_price_period_duration": {
|
|
||||||
"description": "Total varighet av gjeldende eller neste dyrperiode i minutter",
|
|
||||||
"long_description": "Viser den totale varigheten av dyrperioden i minutter. Under en aktiv periode viser dette hele varigheten av gjeldende periode. Når ingen periode er aktiv, viser dette varigheten av neste kommende periode. Eksempel: '60 minutter' for en 1-times periode.",
|
|
||||||
"usage_tips": "Bruk til å planlegge energibesparelsestiltak: 'Hvis duration > 120, reduser varmetemperatur mer aggressivt (lang dyr periode)'. Hjelper med å vurdere hvor mye energiforbruk må reduseres."
|
|
||||||
},
|
},
|
||||||
"home_type": {
|
"home_type": {
|
||||||
"description": "Type bolig (leilighet, hus osv.)",
|
"description": "Type bolig (leilighet, hus osv.)",
|
||||||
|
|
@ -487,6 +489,80 @@
|
||||||
"usage_tips": "Bruk dette for å bekrefte at sanntidsforbruksdata er tilgjengelig. Aktiver varsler hvis dette endres til 'av' uventet, noe som indikerer potensielle maskinvare- eller tilkoblingsproblemer."
|
"usage_tips": "Bruk dette for å bekrefte at sanntidsforbruksdata er tilgjengelig. Aktiver varsler hvis dette endres til 'av' uventet, noe som indikerer potensielle maskinvare- eller tilkoblingsproblemer."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"number": {
|
||||||
|
"best_price_flex_override": {
|
||||||
|
"description": "Maksimal prosent over daglig minimumspris som intervaller kan ha og fortsatt kvalifisere som 'beste pris'. Anbefalt: 15-20 med lemping aktivert (standard), eller 25-35 uten lemping. Maksimum: 50 (tak for pålitelig periodedeteksjon).",
|
||||||
|
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Fleksibilitet'-innstillingen fra alternativer-dialogen for beste pris-periodeberegninger.",
|
||||||
|
"usage_tips": "Aktiver denne entiteten for å dynamisk justere beste pris-deteksjon via automatiseringer, f.eks. høyere fleksibilitet for kritiske laster eller strengere krav for fleksible apparater."
|
||||||
|
},
|
||||||
|
"best_price_min_distance_override": {
|
||||||
|
"description": "Minimum prosentavstand under daglig gjennomsnitt. Intervaller må være så langt under gjennomsnittet for å kvalifisere som 'beste pris'. Hjelper med å skille ekte lavprisperioder fra gjennomsnittspriser.",
|
||||||
|
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Minimumsavstand'-innstillingen fra alternativer-dialogen for beste pris-periodeberegninger.",
|
||||||
|
"usage_tips": "Øk verdien for strengere beste pris-kriterier. Reduser hvis for få perioder blir oppdaget."
|
||||||
|
},
|
||||||
|
"best_price_min_period_length_override": {
|
||||||
|
"description": "Minimum periodelengde i 15-minutters intervaller. Perioder kortere enn dette blir ikke rapportert. Eksempel: 2 = minimum 30 minutter.",
|
||||||
|
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Minimum periodelengde'-innstillingen fra alternativer-dialogen for beste pris-periodeberegninger.",
|
||||||
|
"usage_tips": "Juster til typisk apparatkjøretid: 2 (30 min) for hurtigprogrammer, 4-8 (1-2 timer) for normale sykluser, 8+ for lange ECO-programmer."
|
||||||
|
},
|
||||||
|
"best_price_min_periods_override": {
|
||||||
|
"description": "Minimum antall beste pris-perioder å finne daglig. Når lemping er aktivert, vil systemet automatisk justere kriterier for å oppnå dette antallet.",
|
||||||
|
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Minimum perioder'-innstillingen fra alternativer-dialogen for beste pris-periodeberegninger.",
|
||||||
|
"usage_tips": "Sett dette til antall tidskritiske oppgaver du har daglig. Eksempel: 2 for to vaskemaskinkjøringer."
|
||||||
|
},
|
||||||
|
"best_price_relaxation_attempts_override": {
|
||||||
|
"description": "Antall forsøk på å gradvis lempe kriteriene for å oppnå minimum periodeantall. Hvert forsøk øker fleksibiliteten med 3 prosent. Ved 0 brukes kun basiskriterier.",
|
||||||
|
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Lemping forsøk'-innstillingen fra alternativer-dialogen for beste pris-periodeberegninger.",
|
||||||
|
"usage_tips": "Høyere verdier gjør periodedeteksjon mer adaptiv for dager med stabile priser. Sett til 0 for å tvinge strenge kriterier uten lemping."
|
||||||
|
},
|
||||||
|
"best_price_gap_count_override": {
|
||||||
|
"description": "Maksimalt antall dyrere intervaller som kan tillates mellom billige intervaller mens de fortsatt regnes som en sammenhengende periode. Ved 0 må billige intervaller være påfølgende.",
|
||||||
|
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Gaptoleranse'-innstillingen fra alternativer-dialogen for beste pris-periodeberegninger.",
|
||||||
|
"usage_tips": "Øk dette for apparater med variabel last (f.eks. varmepumper) som kan tåle korte dyrere intervaller. Sett til 0 for kontinuerlige billige perioder."
|
||||||
|
},
|
||||||
|
"peak_price_flex_override": {
|
||||||
|
"description": "Maksimal prosent under daglig maksimumspris som intervaller kan ha og fortsatt kvalifisere som 'topppris'. Samme anbefalinger som for beste pris-fleksibilitet.",
|
||||||
|
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Fleksibilitet'-innstillingen fra alternativer-dialogen for topppris-periodeberegninger.",
|
||||||
|
"usage_tips": "Bruk dette for å justere topppris-terskelen ved kjøretid for automatiseringer som unngår forbruk under dyre timer."
|
||||||
|
},
|
||||||
|
"peak_price_min_distance_override": {
|
||||||
|
"description": "Minimum prosentavstand over daglig gjennomsnitt. Intervaller må være så langt over gjennomsnittet for å kvalifisere som 'topppris'.",
|
||||||
|
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Minimumsavstand'-innstillingen fra alternativer-dialogen for topppris-periodeberegninger.",
|
||||||
|
"usage_tips": "Øk verdien for kun å fange ekstreme pristopper. Reduser for å inkludere flere høypristider."
|
||||||
|
},
|
||||||
|
"peak_price_min_period_length_override": {
|
||||||
|
"description": "Minimum periodelengde i 15-minutters intervaller for topppriser. Kortere pristopper rapporteres ikke som perioder.",
|
||||||
|
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Minimum periodelengde'-innstillingen fra alternativer-dialogen for topppris-periodeberegninger.",
|
||||||
|
"usage_tips": "Kortere verdier fanger korte pristopper. Lengre verdier fokuserer på vedvarende høyprisperioder."
|
||||||
|
},
|
||||||
|
"peak_price_min_periods_override": {
|
||||||
|
"description": "Minimum antall topppris-perioder å finne daglig.",
|
||||||
|
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Minimum perioder'-innstillingen fra alternativer-dialogen for topppris-periodeberegninger.",
|
||||||
|
"usage_tips": "Sett dette basert på hvor mange høyprisperioder du vil fange per dag for automatiseringer."
|
||||||
|
},
|
||||||
|
"peak_price_relaxation_attempts_override": {
|
||||||
|
"description": "Antall forsøk på å lempe kriteriene for å oppnå minimum antall topppris-perioder.",
|
||||||
|
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Lemping forsøk'-innstillingen fra alternativer-dialogen for topppris-periodeberegninger.",
|
||||||
|
"usage_tips": "Øk dette hvis ingen perioder blir funnet på dager med stabile priser. Sett til 0 for å tvinge strenge kriterier."
|
||||||
|
},
|
||||||
|
"peak_price_gap_count_override": {
|
||||||
|
"description": "Maksimalt antall billigere intervaller som kan tillates mellom dyre intervaller mens de fortsatt regnes som en topppris-periode.",
|
||||||
|
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Gaptoleranse'-innstillingen fra alternativer-dialogen for topppris-periodeberegninger.",
|
||||||
|
"usage_tips": "Høyere verdier fanger lengre høyprisperioder selv med korte prisdykk. Sett til 0 for strengt sammenhengende topppriser."
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"switch": {
|
||||||
|
"best_price_enable_relaxation_override": {
|
||||||
|
"description": "Når aktivert, lempes kriteriene automatisk for å oppnå minimum periodeantall. Når deaktivert, rapporteres kun perioder som oppfyller strenge kriterier (muligens null perioder på dager med stabile priser).",
|
||||||
|
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Oppnå minimumsantall'-innstillingen fra alternativer-dialogen for beste pris-periodeberegninger.",
|
||||||
|
"usage_tips": "Aktiver dette for garanterte daglige automatiseringsmuligheter. Deaktiver hvis du kun vil ha virkelig billige perioder, selv om det betyr ingen perioder på noen dager."
|
||||||
|
},
|
||||||
|
"peak_price_enable_relaxation_override": {
|
||||||
|
"description": "Når aktivert, lempes kriteriene automatisk for å oppnå minimum periodeantall. Når deaktivert, rapporteres kun ekte pristopper.",
|
||||||
|
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Oppnå minimumsantall'-innstillingen fra alternativer-dialogen for topppris-periodeberegninger.",
|
||||||
|
"usage_tips": "Aktiver dette for konsistente topppris-varsler. Deaktiver for kun å fange ekstreme pristopper."
|
||||||
|
}
|
||||||
|
},
|
||||||
"home_types": {
|
"home_types": {
|
||||||
"APARTMENT": "Leilighet",
|
"APARTMENT": "Leilighet",
|
||||||
"ROWHOUSE": "Rekkehus",
|
"ROWHOUSE": "Rekkehus",
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,9 @@
|
||||||
"apexcharts": {
|
"apexcharts": {
|
||||||
"title_rating_level": "Prijsfasen dagverloop",
|
"title_rating_level": "Prijsfasen dagverloop",
|
||||||
"title_level": "Prijsniveau",
|
"title_level": "Prijsniveau",
|
||||||
|
"hourly_suffix": "(Ø per uur)",
|
||||||
"best_price_period_name": "Beste prijsperiode",
|
"best_price_period_name": "Beste prijsperiode",
|
||||||
|
"peak_price_period_name": "Piekprijsperiode",
|
||||||
"notification": {
|
"notification": {
|
||||||
"metadata_sensor_unavailable": {
|
"metadata_sensor_unavailable": {
|
||||||
"title": "Tibber Prices: ApexCharts YAML gegenereerd met beperkte functionaliteit",
|
"title": "Tibber Prices: ApexCharts YAML gegenereerd met beperkte functionaliteit",
|
||||||
|
|
@ -290,24 +292,24 @@
|
||||||
"long_description": "Toont het tijdstempel van het laatst beschikbare prijsgegevensinterval van je Tibber-abonnement"
|
"long_description": "Toont het tijdstempel van het laatst beschikbare prijsgegevensinterval van je Tibber-abonnement"
|
||||||
},
|
},
|
||||||
"today_volatility": {
|
"today_volatility": {
|
||||||
"description": "Prijsvolatiliteitsclassificatie voor vandaag",
|
"description": "Hoeveel de stroomprijzen vandaag schommelen",
|
||||||
"long_description": "Toont hoeveel elektriciteitsprijzen variëren gedurende vandaag op basis van de spreiding (verschil tussen hoogste en laagste prijs). Classificatie: laag = spreiding < 5ct, matig = 5-15ct, hoog = 15-30ct, zeer hoog = >30ct.",
|
"long_description": "Geeft aan of de prijzen vandaag stabiel blijven of grote schommelingen hebben. Lage volatiliteit betekent vrij constante prijzen – timing maakt weinig uit. Hoge volatiliteit betekent duidelijke prijsverschillen gedurende de dag – goede kans om verbruik naar goedkopere periodes te verschuiven. `price_coefficient_variation_%` toont het percentage, `price_spread` de absolute prijsspanne.",
|
||||||
"usage_tips": "Gebruik dit om te bepalen of prijsgebaseerde optimalisatie de moeite waard is. Bijvoorbeeld, met een balkonbatterij met 15% efficiëntieverlies is optimalisatie alleen zinvol wanneer volatiliteit ten minste matig is. Maak automatiseringen die volatiliteit controleren voordat je laad-/ontlaadcycli plant."
|
"usage_tips": "Gebruik dit om te beslissen of optimaliseren de moeite waard is. Bij lage volatiliteit kun je apparaten op elk moment laten draaien. Bij hoge volatiliteit bespaar je merkbaar door Best Price-periodes te volgen."
|
||||||
},
|
},
|
||||||
"tomorrow_volatility": {
|
"tomorrow_volatility": {
|
||||||
"description": "Prijsvolatiliteitsclassificatie voor morgen",
|
"description": "Hoeveel de stroomprijzen morgen zullen schommelen",
|
||||||
"long_description": "Toont hoeveel elektriciteitsprijzen zullen variëren gedurende morgen op basis van de spreiding (verschil tussen hoogste en laagste prijs). Wordt onbeschikbaar totdat de gegevens van morgen zijn gepubliceerd (meestal 13:00-14:00 CET).",
|
"long_description": "Geeft aan of de prijzen morgen stabiel blijven of grote schommelingen hebben. Beschikbaar zodra de gegevens voor morgen zijn gepubliceerd (meestal 13:00–14:00 CET). Lage volatiliteit betekent vrij constante prijzen – timing is niet kritisch. Hoge volatiliteit betekent duidelijke prijsverschillen gedurende de dag – goede kans om energie-intensieve taken te plannen. `price_coefficient_variation_%` toont het percentage, `price_spread` de absolute prijsspanne.",
|
||||||
"usage_tips": "Gebruik dit voor vooruitplanning van het energieverbruik van morgen. Als morgen hoog of zeer hoog volatiliteit heeft, is het de moeite waard om de timing van energieverbruik te optimaliseren. Bij laag kun je apparaten op elk moment gebruiken zonder significante kostenverschillen."
|
"usage_tips": "Gebruik dit om het verbruik van morgen te plannen. Hoge volatiliteit? Plan flexibele lasten in Best Price-periodes. Lage volatiliteit? Laat apparaten draaien wanneer het jou uitkomt."
|
||||||
},
|
},
|
||||||
"next_24h_volatility": {
|
"next_24h_volatility": {
|
||||||
"description": "Prijsvolatiliteitsclassificatie voor de rollende volgende 24 uur",
|
"description": "Hoeveel de prijzen de komende 24 uur zullen schommelen",
|
||||||
"long_description": "Toont hoeveel elektriciteitsprijzen variëren in de volgende 24 uur vanaf nu (rollend venster). Dit overschrijdt daggrenzen en wordt elke 15 minuten bijgewerkt, wat een vooruitkijkende volatiliteitsbeoordeling biedt onafhankelijk van kalenderdagen.",
|
"long_description": "Geeft de prijsvolatiliteit aan voor een rollend 24-uursvenster vanaf nu (wordt elke 15 minuten bijgewerkt). Lage volatiliteit betekent vrij constante prijzen. Hoge volatiliteit betekent merkbare prijsschommelingen en dus optimalisatiemogelijkheden. In tegenstelling tot vandaag/morgen-sensoren overschrijdt deze daggrenzen en geeft een doorlopende vooruitblik. `price_coefficient_variation_%` toont het percentage, `price_spread` de absolute prijsspanne.",
|
||||||
"usage_tips": "Beste sensor voor realtime optimalisatiebeslissingen. In tegenstelling tot vandaag/morgen-sensoren die om middernacht wisselen, biedt deze een continue 24-uurs volatiliteitsbeoordeling. Gebruik voor batterijlaadstrategieën die over daggrenzen heen gaan."
|
"usage_tips": "Het beste voor beslissingen in real-time. Gebruik bij het plannen van batterijladen of andere flexibele lasten die over middernacht kunnen lopen. Biedt een consistent 24-uurs beeld, los van de kalenderdag."
|
||||||
},
|
},
|
||||||
"today_tomorrow_volatility": {
|
"today_tomorrow_volatility": {
|
||||||
"description": "Gecombineerde prijsvolatiliteitsclassificatie voor vandaag en morgen",
|
"description": "Gecombineerde prijsvolatiliteit voor vandaag en morgen",
|
||||||
"long_description": "Toont volatiliteit over zowel vandaag als morgen gecombineerd (wanneer de gegevens van morgen beschikbaar zijn). Biedt een uitgebreid overzicht van prijsvariatie over maximaal 48 uur. Valt terug op alleen vandaag wanneer de gegevens van morgen nog niet beschikbaar zijn.",
|
"long_description": "Geeft de totale volatiliteit weer wanneer vandaag en morgen samen worden bekeken (zodra morgengegevens beschikbaar zijn). Toont of er duidelijke prijsverschillen over de daggrens heen zijn. Valt terug naar alleen vandaag als morgengegevens ontbreken. Handig voor meerdaagse optimalisatie. `price_coefficient_variation_%` toont het percentage, `price_spread` de absolute prijsspanne.",
|
||||||
"usage_tips": "Gebruik dit voor meerdaagse planning en om te begrijpen of prijskansen bestaan over de daggrenzen heen. De attributen 'today_volatility' en 'tomorrow_volatility' tonen individuele dagbijdragen. Handig voor het plannen van laadsessies die middernacht kunnen overschrijden."
|
"usage_tips": "Gebruik voor taken die meerdere dagen beslaan. Kijk of de prijsverschillen groot genoeg zijn om plannen op te baseren. De afzonderlijke dag-sensoren tonen per-dag bijdragen als je meer detail wilt."
|
||||||
},
|
},
|
||||||
"data_lifecycle_status": {
|
"data_lifecycle_status": {
|
||||||
"description": "Huidige status van prijsgegevenslevenscyclus en caching",
|
"description": "Huidige status van prijsgegevenslevenscyclus en caching",
|
||||||
|
|
@ -315,39 +317,49 @@
|
||||||
"usage_tips": "Gebruik deze diagnostische sensor om gegevensfrisheid en API-aanroeppatronen te begrijpen. Controleer het 'cache_age'-attribuut om te zien hoe oud de huidige gegevens zijn. Monitor 'next_api_poll' om te weten wanneer de volgende update is gepland. Gebruik 'data_completeness' om te zien of gisteren/vandaag/morgen gegevens beschikbaar zijn. De 'api_calls_today'-teller helpt API-gebruik bij te houden. Perfect voor probleemoplossing of begrip van integratiegedrag."
|
"usage_tips": "Gebruik deze diagnostische sensor om gegevensfrisheid en API-aanroeppatronen te begrijpen. Controleer het 'cache_age'-attribuut om te zien hoe oud de huidige gegevens zijn. Monitor 'next_api_poll' om te weten wanneer de volgende update is gepland. Gebruik 'data_completeness' om te zien of gisteren/vandaag/morgen gegevens beschikbaar zijn. De 'api_calls_today'-teller helpt API-gebruik bij te houden. Perfect voor probleemoplossing of begrip van integratiegedrag."
|
||||||
},
|
},
|
||||||
"best_price_end_time": {
|
"best_price_end_time": {
|
||||||
"description": "Wanneer de huidige of volgende goedkope periode eindigt",
|
"description": "Totale lengte van huidige of volgende voordelige periode (state in uren, attribuut in minuten)",
|
||||||
"long_description": "Toont het eindtijdstempel van de huidige goedkope periode wanneer actief, of het einde van de volgende periode wanneer geen periode actief is. Toont altijd een nuttige tijdreferentie voor planning. Geeft alleen 'Onbekend' terug wanneer geen periodes zijn geconfigureerd.",
|
"long_description": "Toont hoe lang de voordelige periode duurt. State gebruikt uren (float) voor een leesbare UI; attribuut `period_duration_minutes` behoudt afgeronde minuten voor automatiseringen. Actief → duur van de huidige periode, anders de volgende.",
|
||||||
"usage_tips": "Gebruik dit om een aftelling weer te geven zoals 'Goedkope periode eindigt over 2 uur' (wanneer actief) of 'Volgende goedkope periode eindigt om 14:00' (wanneer inactief). Home Assistant toont automatisch relatieve tijd voor tijdstempelsensoren."
|
"usage_tips": "UI kan 1,5 u tonen terwijl `period_duration_minutes` = 90 voor automatiseringen blijft."
|
||||||
|
},
|
||||||
|
"best_price_period_duration": {
|
||||||
|
"description": "Lengte van huidige/volgende goedkope periode",
|
||||||
|
"long_description": "Totale duur van huidige of volgende goedkope periode. De state wordt weergegeven in uren (bijv. 1,5 u) voor gemakkelijk aflezen in de UI, terwijl het attribuut `period_duration_minutes` dezelfde waarde in minuten levert (bijv. 90) voor automatiseringen. Deze waarde vertegenwoordigt de **volledige geplande duur** van de periode en is constant gedurende de gehele periode, zelfs als de resterende tijd (remaining_minutes) afneemt.",
|
||||||
|
"usage_tips": "Combineer met remaining_minutes om te berekenen wanneer langlopende apparaten moeten worden gestopt: Periode is `period_duration_minutes - remaining_minutes` minuten geleden gestart. Dit attribuut ondersteunt energie-optimalisatiestrategieën door te helpen bij het plannen van hoog-verbruiksactiviteiten binnen goedkope periodes."
|
||||||
},
|
},
|
||||||
"best_price_remaining_minutes": {
|
"best_price_remaining_minutes": {
|
||||||
"description": "Resterende minuten in huidige goedkope periode (0 wanneer inactief)",
|
"description": "Resterende tijd in huidige goedkope periode",
|
||||||
"long_description": "Toont hoeveel minuten er nog over zijn in de huidige goedkope periode. Geeft 0 terug wanneer geen periode actief is. Werkt elke minuut bij. Controleer binary_sensor.best_price_period om te zien of een periode momenteel actief is.",
|
"long_description": "Toont hoeveel tijd er nog overblijft in de huidige goedkope periode. De state wordt weergegeven in uren (bijv. 0,75 u) voor gemakkelijk aflezen in dashboards, terwijl het attribuut `remaining_minutes` dezelfde tijd in minuten levert (bijv. 45) voor automatiseringsvoorwaarden. **Afteltimer**: Deze waarde neemt elke minuut af tijdens een actieve periode. Geeft 0 terug wanneer geen goedkope periode actief is. Werkt elke minuut bij.",
|
||||||
"usage_tips": "Perfect voor automatiseringen: 'Als remaining_minutes > 0 EN remaining_minutes < 30, start wasmachine nu'. De waarde 0 maakt het gemakkelijk om te controleren of een periode actief is (waarde > 0) of niet (waarde = 0)."
|
"usage_tips": "Voor automatiseringen: Gebruik attribuut `remaining_minutes` zoals 'Als remaining_minutes > 60, start vaatwasser nu (genoeg tijd om te voltooien)' of 'Als remaining_minutes < 15, rond huidige cyclus binnenkort af'. UI toont gebruiksvriendelijke uren (bijv. 1,25 u). Waarde 0 geeft aan dat geen goedkope periode actief is."
|
||||||
},
|
},
|
||||||
"best_price_progress": {
|
"best_price_progress": {
|
||||||
"description": "Voortgang door huidige goedkope periode (0% wanneer inactief)",
|
"description": "Voortgang door huidige goedkope periode (0% wanneer inactief)",
|
||||||
"long_description": "Toont de voortgang door de huidige goedkope periode als 0-100%. Geeft 0% terug wanneer geen periode actief is. Werkt elke minuut bij. 0% betekent periode net gestart, 100% betekent het eindigt bijna.",
|
"long_description": "Toont voortgang door de huidige goedkope periode als 0-100%. Geeft 0% terug wanneer geen periode actief is. Werkt elke minuut bij. 0% betekent periode net gestart, 100% betekent dat deze bijna eindigt.",
|
||||||
"usage_tips": "Geweldig voor visuele voortgangsbalken. Gebruik in automatiseringen: 'Als progress > 0 EN progress > 75, stuur melding dat goedkope periode bijna eindigt'. Waarde 0 geeft aan dat er geen actieve periode is."
|
"usage_tips": "Geweldig voor visuele voortgangsbalken. Gebruik in automatiseringen: 'Als progress > 0 EN progress > 75, stuur melding dat goedkope periode bijna eindigt'. Waarde 0 geeft aan dat geen periode actief is."
|
||||||
},
|
},
|
||||||
"best_price_next_start_time": {
|
"best_price_next_start_time": {
|
||||||
"description": "Wanneer de volgende goedkope periode begint",
|
"description": "Totale lengte van huidige of volgende dure periode (state in uren, attribuut in minuten)",
|
||||||
"long_description": "Toont wanneer de volgende komende goedkope periode begint. Tijdens een actieve periode toont dit de start van de VOLGENDE periode na de huidige. Geeft alleen 'Onbekend' terug wanneer geen toekomstige periodes zijn geconfigureerd.",
|
"long_description": "Toont hoe lang de dure periode duurt. State gebruikt uren (float) voor de UI; attribuut `period_duration_minutes` behoudt afgeronde minuten voor automatiseringen. Actief → duur van de huidige periode, anders de volgende.",
|
||||||
"usage_tips": "Altijd nuttig voor vooruitplanning: 'Volgende goedkope periode begint over 3 uur' (of je nu in een periode zit of niet). Combineer met automatiseringen: 'Wanneer volgende starttijd over 10 minuten is, stuur melding om wasmachine voor te bereiden'."
|
"usage_tips": "UI kan 0,75 u tonen terwijl `period_duration_minutes` = 45 voor automatiseringen blijft."
|
||||||
},
|
},
|
||||||
"best_price_next_in_minutes": {
|
"best_price_next_in_minutes": {
|
||||||
"description": "Minuten tot volgende goedkope periode begint (0 bij overgang)",
|
"description": "Resterende tijd in huidige dure periode (state in uren, attribuut in minuten)",
|
||||||
"long_description": "Toont minuten tot de volgende goedkope periode begint. Tijdens een actieve periode toont dit de tijd tot de periode NA de huidige. Geeft 0 terug tijdens korte overgangsmomenten. Werkt elke minuut bij.",
|
"long_description": "Toont hoeveel tijd er nog over is. State gebruikt uren (float); attribuut `remaining_minutes` behoudt afgeronde minuten voor automatiseringen. Geeft 0 terug wanneer er geen periode actief is. Werkt elke minuut bij.",
|
||||||
"usage_tips": "Perfect voor 'wacht tot goedkope periode' automatiseringen: 'Als next_in_minutes > 0 EN next_in_minutes < 15, wacht voordat vaatwasser wordt gestart'. Waarde > 0 geeft altijd aan dat een toekomstige periode is gepland."
|
"usage_tips": "Gebruik `remaining_minutes` voor drempels (bijv. > 60) terwijl de state in uren goed leesbaar blijft."
|
||||||
},
|
},
|
||||||
"peak_price_end_time": {
|
"peak_price_end_time": {
|
||||||
"description": "Wanneer de huidige of volgende dure periode eindigt",
|
"description": "Tijd tot volgende dure periode (state in uren, attribuut in minuten)",
|
||||||
"long_description": "Toont het eindtijdstempel van de huidige dure periode wanneer actief, of het einde van de volgende periode wanneer geen periode actief is. Toont altijd een nuttige tijdreferentie voor planning. Geeft alleen 'Onbekend' terug wanneer geen periodes zijn geconfigureerd.",
|
"long_description": "Toont hoe lang het duurt tot de volgende dure periode start. State gebruikt uren (float); attribuut `next_in_minutes` behoudt afgeronde minuten voor automatiseringen. Tijdens een actieve periode is dit de tijd tot de periode na de huidige. 0 tijdens korte overgangen. Werkt elke minuut bij.",
|
||||||
"usage_tips": "Gebruik dit om 'Dure periode eindigt over 1 uur' weer te geven (wanneer actief) of 'Volgende dure periode eindigt om 18:00' (wanneer inactief). Combineer met automatiseringen om activiteiten te hervatten na piek."
|
"usage_tips": "Gebruik `next_in_minutes` in automatiseringen (bijv. < 10) terwijl de state in uren leesbaar blijft."
|
||||||
|
},
|
||||||
|
"peak_price_period_duration": {
|
||||||
|
"description": "Totale duur van huidige of volgende dure periode in minuten",
|
||||||
|
"long_description": "Toont de totale duur van de dure periode in minuten. Tijdens een actieve periode toont dit de volledige lengte van de huidige periode. Wanneer geen periode actief is, toont dit de duur van de volgende komende periode. Voorbeeld: '60 minuten' voor een 1-uur periode.",
|
||||||
|
"usage_tips": "Gebruik om energiebesparende maatregelen te plannen: 'Als duration > 120, verlaag verwarmingstemperatuur agressiever (lange dure periode)'. Helpt bij het inschatten hoeveel energieverbruik moet worden verminderd."
|
||||||
},
|
},
|
||||||
"peak_price_remaining_minutes": {
|
"peak_price_remaining_minutes": {
|
||||||
"description": "Resterende minuten in huidige dure periode (0 wanneer inactief)",
|
"description": "Resterende tijd in huidige dure periode",
|
||||||
"long_description": "Toont hoeveel minuten er nog over zijn in de huidige dure periode. Geeft 0 terug wanneer geen periode actief is. Werkt elke minuut bij. Controleer binary_sensor.peak_price_period om te zien of een periode momenteel actief is.",
|
"long_description": "Toont hoeveel tijd er nog overblijft in de huidige dure periode. De state wordt weergegeven in uren (bijv. 0,75 u) voor gemakkelijk aflezen in dashboards, terwijl het attribuut `remaining_minutes` dezelfde tijd in minuten levert (bijv. 45) voor automatiseringsvoorwaarden. **Afteltimer**: Deze waarde neemt elke minuut af tijdens een actieve periode. Geeft 0 terug wanneer geen dure periode actief is. Werkt elke minuut bij.",
|
||||||
"usage_tips": "Gebruik in automatiseringen: 'Als remaining_minutes > 60, annuleer uitgestelde laadronde'. Waarde 0 maakt het gemakkelijk om onderscheid te maken tussen actieve (waarde > 0) en inactieve (waarde = 0) periodes."
|
"usage_tips": "Voor automatiseringen: Gebruik attribuut `remaining_minutes` zoals 'Als remaining_minutes > 60, annuleer uitgestelde laadronde' of 'Als remaining_minutes < 15, hervat normaal gebruik binnenkort'. UI toont gebruiksvriendelijke uren (bijv. 1,0 u). Waarde 0 geeft aan dat geen dure periode actief is."
|
||||||
},
|
},
|
||||||
"peak_price_progress": {
|
"peak_price_progress": {
|
||||||
"description": "Voortgang door huidige dure periode (0% wanneer inactief)",
|
"description": "Voortgang door huidige dure periode (0% wanneer inactief)",
|
||||||
|
|
@ -360,19 +372,9 @@
|
||||||
"usage_tips": "Altijd nuttig voor planning: 'Volgende dure periode begint over 2 uur'. Automatisering: 'Wanneer volgende starttijd over 30 minuten is, verlaag verwarmingstemperatuur preventief'."
|
"usage_tips": "Altijd nuttig voor planning: 'Volgende dure periode begint over 2 uur'. Automatisering: 'Wanneer volgende starttijd over 30 minuten is, verlaag verwarmingstemperatuur preventief'."
|
||||||
},
|
},
|
||||||
"peak_price_next_in_minutes": {
|
"peak_price_next_in_minutes": {
|
||||||
"description": "Minuten tot volgende dure periode begint (0 bij overgang)",
|
"description": "Tijd tot volgende dure periode",
|
||||||
"long_description": "Toont minuten tot de volgende dure periode begint. Tijdens een actieve periode toont dit de tijd tot de periode NA de huidige. Geeft 0 terug tijdens korte overgangsmomenten. Werkt elke minuut bij.",
|
"long_description": "Toont hoe lang het duurt tot de volgende dure periode. De state wordt weergegeven in uren (bijv. 0,5 u) voor dashboards, terwijl het attribuut `next_in_minutes` minuten levert (bijv. 30) voor automatiseringsvoorwaarden. Tijdens een actieve periode toont dit de tijd tot de periode NA de huidige. Geeft 0 terug tijdens korte overgangsmomenten. Werkt elke minuut bij.",
|
||||||
"usage_tips": "Preventieve automatisering: 'Als next_in_minutes > 0 EN next_in_minutes < 10, voltooi huidige laadcyclus nu voordat prijzen stijgen'."
|
"usage_tips": "Voor automatiseringen: Gebruik attribuut `next_in_minutes` zoals 'Als next_in_minutes > 0 EN next_in_minutes < 10, voltooi huidige laadcyclus nu voordat prijzen stijgen'. Waarde > 0 geeft altijd aan dat een toekomstige dure periode is gepland."
|
||||||
},
|
|
||||||
"best_price_period_duration": {
|
|
||||||
"description": "Totale duur van huidige of volgende goedkope periode in minuten",
|
|
||||||
"long_description": "Toont de totale duur van de goedkope periode in minuten. Tijdens een actieve periode toont dit de volledige lengte van de huidige periode. Wanneer geen periode actief is, toont dit de duur van de volgende komende periode. Voorbeeld: '90 minuten' voor een 1,5-uur periode.",
|
|
||||||
"usage_tips": "Combineer met remaining_minutes voor taakplanning: 'Als duration = 120 EN remaining_minutes > 90, start wasmachine (genoeg tijd om te voltooien)'. Nuttig om te begrijpen of periodes lang genoeg zijn voor energie-intensieve taken."
|
|
||||||
},
|
|
||||||
"peak_price_period_duration": {
|
|
||||||
"description": "Totale duur van huidige of volgende dure periode in minuten",
|
|
||||||
"long_description": "Toont de totale duur van de dure periode in minuten. Tijdens een actieve periode toont dit de volledige lengte van de huidige periode. Wanneer geen periode actief is, toont dit de duur van de volgende komende periode. Voorbeeld: '60 minuten' voor een 1-uur periode.",
|
|
||||||
"usage_tips": "Gebruik om energiebesparende maatregelen te plannen: 'Als duration > 120, verlaag verwarmingstemperatuur agressiever (lange dure periode)'. Helpt bij het inschatten hoeveel energieverbruik moet worden verminderd."
|
|
||||||
},
|
},
|
||||||
"home_type": {
|
"home_type": {
|
||||||
"description": "Type woning (appartement, huis enz.)",
|
"description": "Type woning (appartement, huis enz.)",
|
||||||
|
|
@ -487,6 +489,80 @@
|
||||||
"usage_tips": "Gebruik dit om te verifiëren dat realtimeverbruiksgegevens beschikbaar zijn. Schakel meldingen in als dit onverwacht verandert naar 'uit', wat wijst op mogelijke hardware- of verbindingsproblemen."
|
"usage_tips": "Gebruik dit om te verifiëren dat realtimeverbruiksgegevens beschikbaar zijn. Schakel meldingen in als dit onverwacht verandert naar 'uit', wat wijst op mogelijke hardware- of verbindingsproblemen."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"number": {
|
||||||
|
"best_price_flex_override": {
|
||||||
|
"description": "Maximaal percentage boven de dagelijkse minimumprijs dat intervallen kunnen hebben en nog steeds als 'beste prijs' kwalificeren. Aanbevolen: 15-20 met versoepeling ingeschakeld (standaard), of 25-35 zonder versoepeling. Maximum: 50 (harde limiet voor betrouwbare periodedetectie).",
|
||||||
|
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Flexibiliteit'-instelling uit de opties-dialoog voor beste prijs-periodeberekeningen.",
|
||||||
|
"usage_tips": "Schakel deze entiteit in om beste prijs-detectie dynamisch aan te passen via automatiseringen, bijv. hogere flexibiliteit voor kritieke lasten of strengere eisen voor flexibele apparaten."
|
||||||
|
},
|
||||||
|
"best_price_min_distance_override": {
|
||||||
|
"description": "Minimale procentuele afstand onder het daggemiddelde. Intervallen moeten zo ver onder het gemiddelde liggen om als 'beste prijs' te kwalificeren. Helpt echte lage prijsperioden te onderscheiden van gemiddelde prijzen.",
|
||||||
|
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimale afstand'-instelling uit de opties-dialoog voor beste prijs-periodeberekeningen.",
|
||||||
|
"usage_tips": "Verhoog de waarde voor strengere beste prijs-criteria. Verlaag als te weinig perioden worden gedetecteerd."
|
||||||
|
},
|
||||||
|
"best_price_min_period_length_override": {
|
||||||
|
"description": "Minimale periodelengte in 15-minuten intervallen. Perioden korter dan dit worden niet gerapporteerd. Voorbeeld: 2 = minimaal 30 minuten.",
|
||||||
|
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimale periodelengte'-instelling uit de opties-dialoog voor beste prijs-periodeberekeningen.",
|
||||||
|
"usage_tips": "Pas aan op typische apparaatlooptijd: 2 (30 min) voor snelle programma's, 4-8 (1-2 uur) voor normale cycli, 8+ voor lange ECO-programma's."
|
||||||
|
},
|
||||||
|
"best_price_min_periods_override": {
|
||||||
|
"description": "Minimum aantal beste prijs-perioden om dagelijks te vinden. Wanneer versoepeling is ingeschakeld, past het systeem automatisch de criteria aan om dit aantal te bereiken.",
|
||||||
|
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimum periodes'-instelling uit de opties-dialoog voor beste prijs-periodeberekeningen.",
|
||||||
|
"usage_tips": "Stel dit in op het aantal tijdkritieke taken dat je dagelijks hebt. Voorbeeld: 2 voor twee wasladingen."
|
||||||
|
},
|
||||||
|
"best_price_relaxation_attempts_override": {
|
||||||
|
"description": "Aantal pogingen om de criteria geleidelijk te versoepelen om het minimum aantal perioden te bereiken. Elke poging verhoogt de flexibiliteit met 3 procent. Bij 0 worden alleen basiscriteria gebruikt.",
|
||||||
|
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Versoepeling pogingen'-instelling uit de opties-dialoog voor beste prijs-periodeberekeningen.",
|
||||||
|
"usage_tips": "Hogere waarden maken periodedetectie adaptiever voor dagen met stabiele prijzen. Stel in op 0 om strikte criteria af te dwingen zonder versoepeling."
|
||||||
|
},
|
||||||
|
"best_price_gap_count_override": {
|
||||||
|
"description": "Maximum aantal duurdere intervallen dat mag worden toegestaan tussen goedkope intervallen terwijl ze nog steeds als één aaneengesloten periode tellen. Bij 0 moeten goedkope intervallen opeenvolgend zijn.",
|
||||||
|
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Gap tolerantie'-instelling uit de opties-dialoog voor beste prijs-periodeberekeningen.",
|
||||||
|
"usage_tips": "Verhoog dit voor apparaten met variabele belasting (bijv. warmtepompen) die korte duurdere intervallen kunnen tolereren. Stel in op 0 voor continu goedkope perioden."
|
||||||
|
},
|
||||||
|
"peak_price_flex_override": {
|
||||||
|
"description": "Maximaal percentage onder de dagelijkse maximumprijs dat intervallen kunnen hebben en nog steeds als 'piekprijs' kwalificeren. Dezelfde aanbevelingen als voor beste prijs-flexibiliteit.",
|
||||||
|
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Flexibiliteit'-instelling uit de opties-dialoog voor piekprijs-periodeberekeningen.",
|
||||||
|
"usage_tips": "Gebruik dit om de piekprijs-drempel tijdens runtime aan te passen voor automatiseringen die verbruik tijdens dure uren vermijden."
|
||||||
|
},
|
||||||
|
"peak_price_min_distance_override": {
|
||||||
|
"description": "Minimale procentuele afstand boven het daggemiddelde. Intervallen moeten zo ver boven het gemiddelde liggen om als 'piekprijs' te kwalificeren.",
|
||||||
|
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimale afstand'-instelling uit de opties-dialoog voor piekprijs-periodeberekeningen.",
|
||||||
|
"usage_tips": "Verhoog de waarde om alleen extreme prijspieken te vangen. Verlaag om meer dure tijden mee te nemen."
|
||||||
|
},
|
||||||
|
"peak_price_min_period_length_override": {
|
||||||
|
"description": "Minimale periodelengte in 15-minuten intervallen voor piekprijzen. Kortere prijspieken worden niet als perioden gerapporteerd.",
|
||||||
|
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimale periodelengte'-instelling uit de opties-dialoog voor piekprijs-periodeberekeningen.",
|
||||||
|
"usage_tips": "Kortere waarden vangen korte prijspieken. Langere waarden focussen op aanhoudende dure perioden."
|
||||||
|
},
|
||||||
|
"peak_price_min_periods_override": {
|
||||||
|
"description": "Minimum aantal piekprijs-perioden om dagelijks te vinden.",
|
||||||
|
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimum periodes'-instelling uit de opties-dialoog voor piekprijs-periodeberekeningen.",
|
||||||
|
"usage_tips": "Stel dit in op basis van hoeveel dure perioden je per dag wilt vangen voor automatiseringen."
|
||||||
|
},
|
||||||
|
"peak_price_relaxation_attempts_override": {
|
||||||
|
"description": "Aantal pogingen om de criteria te versoepelen om het minimum aantal piekprijs-perioden te bereiken.",
|
||||||
|
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Versoepeling pogingen'-instelling uit de opties-dialoog voor piekprijs-periodeberekeningen.",
|
||||||
|
"usage_tips": "Verhoog dit als geen perioden worden gevonden op dagen met stabiele prijzen. Stel in op 0 om strikte criteria af te dwingen."
|
||||||
|
},
|
||||||
|
"peak_price_gap_count_override": {
|
||||||
|
"description": "Maximum aantal goedkopere intervallen dat mag worden toegestaan tussen dure intervallen terwijl ze nog steeds als één piekprijs-periode tellen.",
|
||||||
|
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Gap tolerantie'-instelling uit de opties-dialoog voor piekprijs-periodeberekeningen.",
|
||||||
|
"usage_tips": "Hogere waarden vangen langere dure perioden zelfs met korte prijsdips. Stel in op 0 voor strikt aaneengesloten piekprijzen."
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"switch": {
|
||||||
|
"best_price_enable_relaxation_override": {
|
||||||
|
"description": "Indien ingeschakeld, worden criteria automatisch versoepeld om het minimum aantal perioden te bereiken. Indien uitgeschakeld, worden alleen perioden gerapporteerd die aan strikte criteria voldoen (mogelijk nul perioden op dagen met stabiele prijzen).",
|
||||||
|
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimum aantal bereiken'-instelling uit de opties-dialoog voor beste prijs-periodeberekeningen.",
|
||||||
|
"usage_tips": "Schakel dit in voor gegarandeerde dagelijkse automatiseringsmogelijkheden. Schakel uit als je alleen echt goedkope perioden wilt, ook als dat betekent dat er op sommige dagen geen perioden zijn."
|
||||||
|
},
|
||||||
|
"peak_price_enable_relaxation_override": {
|
||||||
|
"description": "Indien ingeschakeld, worden criteria automatisch versoepeld om het minimum aantal perioden te bereiken. Indien uitgeschakeld, worden alleen echte prijspieken gerapporteerd.",
|
||||||
|
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimum aantal bereiken'-instelling uit de opties-dialoog voor piekprijs-periodeberekeningen.",
|
||||||
|
"usage_tips": "Schakel dit in voor consistente piekprijs-waarschuwingen. Schakel uit om alleen extreme prijspieken te vangen."
|
||||||
|
}
|
||||||
|
},
|
||||||
"home_types": {
|
"home_types": {
|
||||||
"APARTMENT": "Appartement",
|
"APARTMENT": "Appartement",
|
||||||
"ROWHOUSE": "Rijhuis",
|
"ROWHOUSE": "Rijhuis",
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,9 @@
|
||||||
"apexcharts": {
|
"apexcharts": {
|
||||||
"title_rating_level": "Prisfaser dagsprogress",
|
"title_rating_level": "Prisfaser dagsprogress",
|
||||||
"title_level": "Prisnivå",
|
"title_level": "Prisnivå",
|
||||||
|
"hourly_suffix": "(Ø per timme)",
|
||||||
"best_price_period_name": "Bästa prisperiod",
|
"best_price_period_name": "Bästa prisperiod",
|
||||||
|
"peak_price_period_name": "Toppprisperiod",
|
||||||
"notification": {
|
"notification": {
|
||||||
"metadata_sensor_unavailable": {
|
"metadata_sensor_unavailable": {
|
||||||
"title": "Tibber Prices: ApexCharts YAML genererad med begränsad funktionalitet",
|
"title": "Tibber Prices: ApexCharts YAML genererad med begränsad funktionalitet",
|
||||||
|
|
@ -290,64 +292,74 @@
|
||||||
"long_description": "Visar tidsstämpeln för det senaste tillgängliga prisdataintervallet från ditt Tibber-abonnemang"
|
"long_description": "Visar tidsstämpeln för det senaste tillgängliga prisdataintervallet från ditt Tibber-abonnemang"
|
||||||
},
|
},
|
||||||
"today_volatility": {
|
"today_volatility": {
|
||||||
"description": "Prisvolatilitetsklassificering för idag",
|
"description": "Hur mycket elpriserna varierar idag",
|
||||||
"long_description": "Visar hur mycket elpriserna varierar under dagen baserat på spridningen (skillnaden mellan högsta och lägsta pris). Klassificering: låg = spridning < 5 öre, måttlig = 5-15 öre, hög = 15-30 öre, mycket hög = >30 öre.",
|
"long_description": "Visar om dagens priser är stabila eller har stora svängningar. Låg volatilitet innebär ganska jämna priser – timing spelar liten roll. Hög volatilitet innebär tydliga prisskillnader under dagen – bra tillfälle att flytta förbrukning till billigare perioder. `price_coefficient_variation_%` visar procentvärdet, `price_spread` visar den absoluta prisspannet.",
|
||||||
"usage_tips": "Använd detta för att avgöra om prisbaserad optimering är värt besväret. Till exempel, med ett balkongbatteri som har 15% effektivitetsförlust är optimering endast meningsfull när volatiliteten är åtminstone måttlig. Skapa automationer som kontrollerar volatiliteten innan laddnings-/urladdningscykler planeras."
|
"usage_tips": "Använd detta för att avgöra om optimering är värt besväret. Vid låg volatilitet kan du köra enheter när som helst. Vid hög volatilitet sparar du märkbart genom att följa Best Price-perioder."
|
||||||
},
|
},
|
||||||
"tomorrow_volatility": {
|
"tomorrow_volatility": {
|
||||||
"description": "Prisvolatilitetsklassificering för imorgon",
|
"description": "Hur mycket elpriserna kommer att variera i morgon",
|
||||||
"long_description": "Visar hur mycket elpriserna kommer att variera under morgondagen baserat på spridningen (skillnaden mellan högsta och lägsta pris). Blir otillgänglig tills morgondagens data publiceras (vanligtvis 13:00-14:00 CET).",
|
"long_description": "Visar om priserna i morgon blir stabila eller får stora svängningar. Tillgänglig när morgondagens data är publicerad (vanligen 13:00–14:00 CET). Låg volatilitet innebär ganska jämna priser – timing är inte kritisk. Hög volatilitet innebär tydliga prisskillnader under dagen – bra läge att planera energikrävande uppgifter. `price_coefficient_variation_%` visar procentvärdet, `price_spread` visar den absoluta prisspannet.",
|
||||||
"usage_tips": "Använd detta för förhandsplanering av morgondagens energianvändning. Om morgondagen har hög eller mycket hög volatilitet är det värt att optimera energiförbrukningstiming. Vid låg volatilitet kan du köra enheter när som helst utan betydande kostnadsskillnader."
|
"usage_tips": "Använd för att planera morgondagens förbrukning. Hög volatilitet? Planera flexibla laster i Best Price-perioder. Låg volatilitet? Kör enheter när det passar dig."
|
||||||
},
|
},
|
||||||
"next_24h_volatility": {
|
"next_24h_volatility": {
|
||||||
"description": "Prisvolatilitetsklassificering för rullande nästa 24 timmar",
|
"description": "Hur mycket priserna varierar de kommande 24 timmarna",
|
||||||
"long_description": "Visar hur mycket elpriserna varierar under de nästa 24 timmarna från nu (rullande fönster). Detta korsar daggränser och uppdateras var 15:e minut, vilket ger en framåtblickande volatilitetsbedömning oberoende av kalenderdagar.",
|
"long_description": "Visar prisvolatilitet för ett rullande 24-timmarsfönster från nu (uppdateras var 15:e minut). Låg volatilitet innebär ganska jämna priser. Hög volatilitet innebär märkbara prissvängningar och därmed optimeringsmöjligheter. Till skillnad från idag/i morgon-sensorer korsar den här dagsgränser och ger en kontinuerlig framåtblickande bedömning. `price_coefficient_variation_%` visar procentvärdet, `price_spread` visar den absoluta prisspannet.",
|
||||||
"usage_tips": "Bästa sensorn för realtidsoptimeringsbeslut. Till skillnad från idag/imorgon-sensorer som växlar vid midnatt ger detta en kontinuerlig 24t volatilitetsbedömning. Använd för batteriladningsstrategier som sträcker sig över daggränser."
|
"usage_tips": "Bäst för beslut i realtid. Använd vid planering av batteriladdning eller andra flexibla laster som kan gå över midnatt. Ger en konsekvent 24h-bild oberoende av kalenderdag."
|
||||||
},
|
},
|
||||||
"today_tomorrow_volatility": {
|
"today_tomorrow_volatility": {
|
||||||
"description": "Kombinerad prisvolatilitetsklassificering för idag och imorgon",
|
"description": "Kombinerad prisvolatilitet för idag och imorgon",
|
||||||
"long_description": "Visar volatilitet över både idag och imorgon kombinerat (när morgondagens data är tillgänglig). Ger en utökad vy av prisvariation över upp till 48 timmar. Faller tillbaka till endast idag när morgondagens data inte är tillgänglig ännu.",
|
"long_description": "Visar den samlade volatiliteten när idag och imorgon ses tillsammans (när morgondatan finns). Visar om det finns tydliga prisskillnader över dagsgränsen. Faller tillbaka till endast idag om morgondatan saknas. Nyttig för flerdagarsoptimering. `price_coefficient_variation_%` visar procentvärdet, `price_spread` visar den absoluta prisspannet.",
|
||||||
"usage_tips": "Använd detta för flerdagarsplanering och för att förstå om prismöjligheter existerar över dagsgränsen. Attributen 'today_volatility' och 'tomorrow_volatility' visar individuella dagsbidrag. Användbart för planering av laddningssessioner som kan sträcka sig över midnatt."
|
"usage_tips": "Använd för uppgifter som sträcker sig över flera dagar. Kontrollera om prisskillnaderna är stora nog för att planera efter. De enskilda dag-sensorerna visar bidrag per dag om du behöver mer detaljer."
|
||||||
},
|
},
|
||||||
"data_lifecycle_status": {
|
"data_lifecycle_status": {
|
||||||
"description": "Aktuell status för prisdatalivscykel och cachning",
|
"description": "Gjeldende tilstand for prisdatalivssyklus og hurtigbufring",
|
||||||
"long_description": "Visar om integrationen använder cachad data eller färsk data från API:et. Visar aktuell livscykelstatus: 'cached' (använder lagrad data), 'fresh' (nyss hämtad från API), 'refreshing' (hämtar för närvarande), 'searching_tomorrow' (söker aktivt efter morgondagens data efter 13:00), 'turnover_pending' (inom 15 minuter före midnatt, 23:45-00:00), eller 'error' (hämtning misslyckades). Inkluderar omfattande attribut som cache-ålder, nästa API-polling, datafullständighet och API-anropsstatistik.",
|
"long_description": "Viser om integrasjonen bruker hurtigbufrede data eller ferske data fra API-et. Viser gjeldende livssyklustilstand: 'cached' (bruker lagrede data), 'fresh' (nettopp hentet fra API), 'refreshing' (henter for øyeblikket), 'searching_tomorrow' (søker aktivt etter morgendagens data etter 13:00), 'turnover_pending' (innen 15 minutter før midnatt, 23:45-00:00), eller 'error' (henting mislyktes). Inkluderer omfattende attributter som cache-alder, neste API-spørring, datafullstendighet og API-anropsstatistikk.",
|
||||||
"usage_tips": "Använd denna diagnostiksensor för att förstå datafärskhet och API-anropsmönster. Kontrollera 'cache_age'-attributet för att se hur gammal den aktuella datan är. Övervaka 'next_api_poll' för att veta när nästa uppdatering är schemalagd. Använd 'data_completeness' för att se om data för igår/idag/imorgon är tillgänglig. Räknaren 'api_calls_today' hjälper till att spåra API-användning. Perfekt för felsökning eller förståelse av integrationens beteende."
|
"usage_tips": "Bruk denne diagnosesensoren for å forstå dataferskhet og API-anropsmønstre. Sjekk 'cache_age'-attributtet for å se hvor gamle de nåværende dataene er. Overvåk 'next_api_poll' for å vite når neste oppdatering er planlagt. Bruk 'data_completeness' for å se om data for i går/i dag/i morgen er tilgjengelig. 'api_calls_today'-telleren hjelper med å spore API-bruk. Perfekt for feilsøking eller forståelse av integrasjonens oppførsel."
|
||||||
},
|
},
|
||||||
"best_price_end_time": {
|
"best_price_end_time": {
|
||||||
"description": "När nuvarande eller nästa billigperiod slutar",
|
"description": "Total längd för nuvarande eller nästa billigperiod (state i timmar, attribut i minuter)",
|
||||||
"long_description": "Visar sluttidsstämpeln för nuvarande billigperiod när aktiv, eller slutet av nästa period när ingen period är aktiv. Visar alltid en användbar tidsreferens för planering. Returnerar 'Okänt' endast när inga perioder är konfigurerade.",
|
"long_description": "Visar hur länge billigperioden varar. State använder timmar (decimal) för en läsbar UI; attributet `period_duration_minutes` behåller avrundade minuter för automationer. Aktiv → varaktighet för aktuell period, annars nästa.",
|
||||||
"usage_tips": "Använd detta för att visa en nedräkning som 'Billigperiod slutar om 2 timmar' (när aktiv) eller 'Nästa billigperiod slutar kl 14:00' (när inaktiv). Home Assistant visar automatiskt relativ tid för tidsstämpelsensorer."
|
"usage_tips": "UI kan visa 1,5 h medan `period_duration_minutes` = 90 för automationer."
|
||||||
|
},
|
||||||
|
"best_price_period_duration": {
|
||||||
|
"description": "Längd på nuvarande/nästa billigperiod",
|
||||||
|
"long_description": "Total längd av nuvarande eller nästa billigperiod. State visas i timmar (t.ex. 1,5 h) för enkel avläsning i UI, medan attributet `period_duration_minutes` ger samma värde i minuter (t.ex. 90) för automationer. Detta värde representerar den **fullständigt planerade längden** av perioden och är konstant under hela perioden, även när återstående tid (remaining_minutes) minskar.",
|
||||||
|
"usage_tips": "Kombinera med remaining_minutes för att beräkna när långvariga enheter ska stoppas: Perioden startade för `period_duration_minutes - remaining_minutes` minuter sedan. Detta attribut stöder energioptimeringsstrategier genom att hjälpa till med att planera högförbruksaktiviteter inom billiga perioder."
|
||||||
},
|
},
|
||||||
"best_price_remaining_minutes": {
|
"best_price_remaining_minutes": {
|
||||||
"description": "Återstående minuter i nuvarande billigperiod (0 när inaktiv)",
|
"description": "Tid kvar i nuvarande billigperiod",
|
||||||
"long_description": "Visar hur många minuter som återstår i nuvarande billigperiod. Returnerar 0 när ingen period är aktiv. Uppdateras varje minut. Kontrollera binary_sensor.best_price_period för att se om en period är aktiv.",
|
"long_description": "Visar hur mycket tid som återstår i nuvarande billigperiod. State visas i timmar (t.ex. 0,75 h) för enkel avläsning i instrumentpaneler, medan attributet `remaining_minutes` ger samma tid i minuter (t.ex. 45) för automationsvillkor. **Nedräkningstimer**: Detta värde minskar varje minut under en aktiv period. Returnerar 0 när ingen billigperiod är aktiv. Uppdateras varje minut.",
|
||||||
"usage_tips": "Perfekt för automationer: 'Om remaining_minutes > 0 OCH remaining_minutes < 30, starta tvättmaskin nu'. Värdet 0 gör det enkelt att kontrollera om en period är aktiv (värde > 0) eller inte (värde = 0)."
|
"usage_tips": "För automationer: Använd attribut `remaining_minutes` som 'Om remaining_minutes > 60, starta diskmaskin nu (tillräckligt med tid för att slutföra)' eller 'Om remaining_minutes < 15, avsluta nuvarande cykel snart'. UI visar användarvänliga timmar (t.ex. 1,25 h). Värde 0 indikerar ingen aktiv billigperiod."
|
||||||
},
|
},
|
||||||
"best_price_progress": {
|
"best_price_progress": {
|
||||||
"description": "Framsteg genom nuvarande billigperiod (0% när inaktiv)",
|
"description": "Framsteg genom nuvarande billigperiod (0% när inaktiv)",
|
||||||
"long_description": "Visar framsteg genom nuvarande billigperiod som 0-100%. Returnerar 0% när ingen period är aktiv. Uppdateras varje minut. 0% betyder period just startad, 100% betyder den snart slutar.",
|
"long_description": "Visar framsteg genom nuvarande billigperiod som 0-100%. Returnerar 0% när ingen period är aktiv. Uppdateras varje minut. 0% betyder att perioden just startade, 100% betyder att den snart slutar.",
|
||||||
"usage_tips": "Bra för visuella framstegsstaplar. Använd i automationer: 'Om progress > 0 OCH progress > 75, skicka meddelande att billigperiod snart slutar'. Värde 0 indikerar ingen aktiv period."
|
"usage_tips": "Perfekt för visuella framstegsindikatorer. Använd i automationer: 'Om progress > 0 OCH progress > 75, skicka avisering om att billigperioden snart slutar'. Värde 0 indikerar ingen aktiv period."
|
||||||
},
|
},
|
||||||
"best_price_next_start_time": {
|
"best_price_next_start_time": {
|
||||||
"description": "När nästa billigperiod startar",
|
"description": "Total längd för nuvarande eller nästa dyrperiod (state i timmar, attribut i minuter)",
|
||||||
"long_description": "Visar när nästa kommande billigperiod startar. Under en aktiv period visar detta starten av NÄSTA period efter den nuvarande. Returnerar 'Okänt' endast när inga framtida perioder är konfigurerade.",
|
"long_description": "Visar hur länge den dyra perioden varar. State använder timmar (decimal) för UI; attributet `period_duration_minutes` behåller avrundade minuter för automationer. Aktiv → varaktighet för aktuell period, annars nästa.",
|
||||||
"usage_tips": "Alltid användbart för framåtplanering: 'Nästa billigperiod startar om 3 timmar' (oavsett om du är i en period nu eller inte). Kombinera med automationer: 'När nästa starttid är om 10 minuter, skicka meddelande för att förbereda tvättmaskin'."
|
"usage_tips": "UI kan visa 0,75 h medan `period_duration_minutes` = 45 för automationer."
|
||||||
},
|
},
|
||||||
"best_price_next_in_minutes": {
|
"best_price_next_in_minutes": {
|
||||||
"description": "Minuter tills nästa billigperiod startar (0 vid övergång)",
|
"description": "Tid kvar i nuvarande dyrperiod (state i timmar, attribut i minuter)",
|
||||||
"long_description": "Visar minuter tills nästa billigperiod startar. Under en aktiv period visar detta tiden till perioden EFTER den nuvarande. Returnerar 0 under korta övergångsmoment. Uppdateras varje minut.",
|
"long_description": "Visar hur mycket tid som återstår. State använder timmar (decimal); attributet `remaining_minutes` behåller avrundade minuter för automationer. Returnerar 0 när ingen period är aktiv. Uppdateras varje minut.",
|
||||||
"usage_tips": "Perfekt för 'vänta tills billigperiod' automationer: 'Om next_in_minutes > 0 OCH next_in_minutes < 15, vänta innan diskmaskin startas'. Värde > 0 indikerar alltid att en framtida period är planerad."
|
"usage_tips": "Använd `remaining_minutes` för trösklar (t.ex. > 60) medan state är lätt att läsa i timmar."
|
||||||
},
|
},
|
||||||
"peak_price_end_time": {
|
"peak_price_end_time": {
|
||||||
"description": "När nuvarande eller nästa dyrperiod slutar",
|
"description": "Tid tills nästa dyrperiod startar (state i timmar, attribut i minuter)",
|
||||||
"long_description": "Visar sluttidsstämpeln för nuvarande dyrperiod när aktiv, eller slutet av nästa period när ingen period är aktiv. Visar alltid en användbar tidsreferens för planering. Returnerar 'Okänt' endast när inga perioder är konfigurerade.",
|
"long_description": "Visar hur länge tills nästa dyrperiod startar. State använder timmar (decimal); attributet `next_in_minutes` behåller avrundade minuter för automationer. Under en aktiv period visar detta tiden till perioden efter den aktuella. 0 under korta övergångar. Uppdateras varje minut.",
|
||||||
"usage_tips": "Använd detta för att visa 'Dyrperiod slutar om 1 timme' (när aktiv) eller 'Nästa dyrperiod slutar kl 18:00' (när inaktiv). Kombinera med automationer för att återuppta drift efter topp."
|
"usage_tips": "Använd `next_in_minutes` i automationer (t.ex. < 10) medan state är lätt att läsa i timmar."
|
||||||
|
},
|
||||||
|
"peak_price_period_duration": {
|
||||||
|
"description": "Längd på nuvarande/nästa dyrperiod",
|
||||||
|
"long_description": "Total längd av nuvarande eller nästa dyrperiod. State visas i timmar (t.ex. 1,5 h) för enkel avläsning i UI, medan attributet `period_duration_minutes` ger samma värde i minuter (t.ex. 90) för automationer. Detta värde representerar den **fullständigt planerade längden** av perioden och är konstant under hela perioden, även när återstående tid (remaining_minutes) minskar.",
|
||||||
|
"usage_tips": "Kombinera med remaining_minutes för att beräkna när långvariga enheter ska stoppas: Perioden startade för `period_duration_minutes - remaining_minutes` minuter sedan. Detta attribut stöder energibesparingsstrategier genom att hjälpa till med att planera högförbruksaktiviteter utanför dyra perioder."
|
||||||
},
|
},
|
||||||
"peak_price_remaining_minutes": {
|
"peak_price_remaining_minutes": {
|
||||||
"description": "Återstående minuter i nuvarande dyrperiod (0 när inaktiv)",
|
"description": "Tid kvar i nuvarande dyrperiod",
|
||||||
"long_description": "Visar hur många minuter som återstår i nuvarande dyrperiod. Returnerar 0 när ingen period är aktiv. Uppdateras varje minut. Kontrollera binary_sensor.peak_price_period för att se om en period är aktiv.",
|
"long_description": "Visar hur mycket tid som återstår i nuvarande dyrperiod. State visas i timmar (t.ex. 0,75 h) för enkel avläsning i instrumentpaneler, medan attributet `remaining_minutes` ger samma tid i minuter (t.ex. 45) för automationsvillkor. **Nedräkningstimer**: Detta värde minskar varje minut under en aktiv period. Returnerar 0 när ingen dyrperiod är aktiv. Uppdateras varje minut.",
|
||||||
"usage_tips": "Använd i automationer: 'Om remaining_minutes > 60, avbryt uppskjuten laddningssession'. Värde 0 gör det enkelt att skilja mellan aktiva (värde > 0) och inaktiva (värde = 0) perioder."
|
"usage_tips": "För automationer: Använd attribut `remaining_minutes` som 'Om remaining_minutes > 60, avbryt uppskjuten laddningssession' eller 'Om remaining_minutes < 15, återuppta normal drift snart'. UI visar användarvänliga timmar (t.ex. 1,0 h). Värde 0 indikerar ingen aktiv dyrperiod."
|
||||||
},
|
},
|
||||||
"peak_price_progress": {
|
"peak_price_progress": {
|
||||||
"description": "Framsteg genom nuvarande dyrperiod (0% när inaktiv)",
|
"description": "Framsteg genom nuvarande dyrperiod (0% när inaktiv)",
|
||||||
|
|
@ -360,19 +372,9 @@
|
||||||
"usage_tips": "Alltid användbart för planering: 'Nästa dyrperiod startar om 2 timmar'. Automation: 'När nästa starttid är om 30 minuter, minska värmetemperatur förebyggande'."
|
"usage_tips": "Alltid användbart för planering: 'Nästa dyrperiod startar om 2 timmar'. Automation: 'När nästa starttid är om 30 minuter, minska värmetemperatur förebyggande'."
|
||||||
},
|
},
|
||||||
"peak_price_next_in_minutes": {
|
"peak_price_next_in_minutes": {
|
||||||
"description": "Minuter tills nästa dyrperiod startar (0 vid övergång)",
|
"description": "Tid till nästa dyrperiod",
|
||||||
"long_description": "Visar minuter tills nästa dyrperiod startar. Under en aktiv period visar detta tiden till perioden EFTER den nuvarande. Returnerar 0 under korta övergångsmoment. Uppdateras varje minut.",
|
"long_description": "Visar hur länge till nästa dyrperiod. State visas i timmar (t.ex. 0,5 h) för instrumentpaneler, medan attributet `next_in_minutes` ger minuter (t.ex. 30) för automationsvillkor. Under en aktiv period visar detta tiden till perioden EFTER den nuvarande. Returnerar 0 under korta övergångsmoment. Uppdateras varje minut.",
|
||||||
"usage_tips": "Förebyggande automation: 'Om next_in_minutes > 0 OCH next_in_minutes < 10, slutför nuvarande laddcykel nu innan priserna ökar'."
|
"usage_tips": "För automationer: Använd attribut `next_in_minutes` som 'Om next_in_minutes > 0 OCH next_in_minutes < 10, slutför nuvarande laddcykel nu innan priserna ökar'. Värde > 0 indikerar alltid att en framtida dyrperiod är planerad."
|
||||||
},
|
|
||||||
"best_price_period_duration": {
|
|
||||||
"description": "Total längd på nuvarande eller nästa billigperiod i minuter",
|
|
||||||
"long_description": "Visar den totala längden på billigperioden i minuter. Under en aktiv period visar detta hela längden av nuvarande period. När ingen period är aktiv visar detta längden på nästa kommande period. Exempel: '90 minuter' för en 1,5-timmars period.",
|
|
||||||
"usage_tips": "Kombinera med remaining_minutes för att planera uppgifter: 'Om duration = 120 OCH remaining_minutes > 90, starta tvättmaskin (tillräckligt med tid för att slutföra)'. Användbart för att förstå om perioder är tillräckligt långa för energikrävande uppgifter."
|
|
||||||
},
|
|
||||||
"peak_price_period_duration": {
|
|
||||||
"description": "Total längd på nuvarande eller nästa dyrperiod i minuter",
|
|
||||||
"long_description": "Visar den totala längden på dyrperioden i minuter. Under en aktiv period visar detta hela längden av nuvarande period. När ingen period är aktiv visar detta längden på nästa kommande period. Exempel: '60 minuter' för en 1-timmars period.",
|
|
||||||
"usage_tips": "Använd för att planera energisparåtgärder: 'Om duration > 120, minska värmetemperatur mer aggressivt (lång dyr period)'. Hjälper till att bedöma hur mycket energiförbrukning måste minskas."
|
|
||||||
},
|
},
|
||||||
"home_type": {
|
"home_type": {
|
||||||
"description": "Bostadstyp (lägenhet, hus osv.)",
|
"description": "Bostadstyp (lägenhet, hus osv.)",
|
||||||
|
|
@ -487,6 +489,80 @@
|
||||||
"usage_tips": "Använd detta för att verifiera att realtidsförbrukningen är tillgänglig. Aktivera meddelanden om detta oväntat ändras till 'av', vilket indikerar potentiella hårdvaru- eller anslutningsproblem."
|
"usage_tips": "Använd detta för att verifiera att realtidsförbrukningen är tillgänglig. Aktivera meddelanden om detta oväntat ändras till 'av', vilket indikerar potentiella hårdvaru- eller anslutningsproblem."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"number": {
|
||||||
|
"best_price_flex_override": {
|
||||||
|
"description": "Maximal procent över daglig minimumpris som intervaller kan ha och fortfarande kvalificera som 'bästa pris'. Rekommenderas: 15-20 med lättnad aktiverad (standard), eller 25-35 utan lättnad. Maximum: 50 (hårt tak för tillförlitlig perioddetektering).",
|
||||||
|
"long_description": "När denna entitet är aktiverad överskriver värdet 'Flexibilitet'-inställningen från alternativ-dialogen för bästa pris-periodberäkningar.",
|
||||||
|
"usage_tips": "Aktivera denna entitet för att dynamiskt justera bästa pris-detektering via automatiseringar, t.ex. högre flexibilitet för kritiska laster eller striktare krav för flexibla apparater."
|
||||||
|
},
|
||||||
|
"best_price_min_distance_override": {
|
||||||
|
"description": "Minsta procentuella avstånd under dagligt genomsnitt. Intervaller måste vara så långt under genomsnittet för att kvalificera som 'bästa pris'. Hjälper att skilja äkta lågprisperioder från genomsnittspriser.",
|
||||||
|
"long_description": "När denna entitet är aktiverad överskriver värdet 'Minimiavstånd'-inställningen från alternativ-dialogen för bästa pris-periodberäkningar.",
|
||||||
|
"usage_tips": "Öka värdet för striktare bästa pris-kriterier. Minska om för få perioder detekteras."
|
||||||
|
},
|
||||||
|
"best_price_min_period_length_override": {
|
||||||
|
"description": "Minsta periodlängd i 15-minuters intervaller. Perioder kortare än detta rapporteras inte. Exempel: 2 = minst 30 minuter.",
|
||||||
|
"long_description": "När denna entitet är aktiverad överskriver värdet 'Minsta periodlängd'-inställningen från alternativ-dialogen för bästa pris-periodberäkningar.",
|
||||||
|
"usage_tips": "Anpassa till typisk apparatkörtid: 2 (30 min) för snabbprogram, 4-8 (1-2 timmar) för normala cykler, 8+ för långa ECO-program."
|
||||||
|
},
|
||||||
|
"best_price_min_periods_override": {
|
||||||
|
"description": "Minsta antal bästa pris-perioder att hitta dagligen. När lättnad är aktiverad kommer systemet automatiskt att justera kriterierna för att uppnå detta antal.",
|
||||||
|
"long_description": "När denna entitet är aktiverad överskriver värdet 'Minsta antal perioder'-inställningen från alternativ-dialogen för bästa pris-periodberäkningar.",
|
||||||
|
"usage_tips": "Ställ in detta på antalet tidskritiska uppgifter du har dagligen. Exempel: 2 för två tvattmaskinskörningar."
|
||||||
|
},
|
||||||
|
"best_price_relaxation_attempts_override": {
|
||||||
|
"description": "Antal försök att gradvis lätta på kriterierna för att uppnå minsta periodantal. Varje försök ökar flexibiliteten med 3 procent. Vid 0 används endast baskriterier.",
|
||||||
|
"long_description": "När denna entitet är aktiverad överskriver värdet 'Lättnadsförsök'-inställningen från alternativ-dialogen för bästa pris-periodberäkningar.",
|
||||||
|
"usage_tips": "Högre värden gör perioddetektering mer adaptiv för dagar med stabila priser. Ställ in på 0 för att tvinga strikta kriterier utan lättnad."
|
||||||
|
},
|
||||||
|
"best_price_gap_count_override": {
|
||||||
|
"description": "Maximalt antal dyrare intervaller som kan tillåtas mellan billiga intervaller medan de fortfarande räknas som en sammanhängande period. Vid 0 måste billiga intervaller vara påföljande.",
|
||||||
|
"long_description": "När denna entitet är aktiverad överskriver värdet 'Glaptolerans'-inställningen från alternativ-dialogen för bästa pris-periodberäkningar.",
|
||||||
|
"usage_tips": "Öka detta för apparater med variabel last (t.ex. värmepumpar) som kan tolerera korta dyrare intervaller. Ställ in på 0 för kontinuerligt billiga perioder."
|
||||||
|
},
|
||||||
|
"peak_price_flex_override": {
|
||||||
|
"description": "Maximal procent under daglig maximumpris som intervaller kan ha och fortfarande kvalificera som 'topppris'. Samma rekommendationer som för bästa pris-flexibilitet.",
|
||||||
|
"long_description": "När denna entitet är aktiverad överskriver värdet 'Flexibilitet'-inställningen från alternativ-dialogen för topppris-periodberäkningar.",
|
||||||
|
"usage_tips": "Använd detta för att justera topppris-tröskeln vid körtid för automatiseringar som undviker förbrukning under dyra timmar."
|
||||||
|
},
|
||||||
|
"peak_price_min_distance_override": {
|
||||||
|
"description": "Minsta procentuella avstånd över dagligt genomsnitt. Intervaller måste vara så långt över genomsnittet för att kvalificera som 'topppris'.",
|
||||||
|
"long_description": "När denna entitet är aktiverad överskriver värdet 'Minimiavstånd'-inställningen från alternativ-dialogen för topppris-periodberäkningar.",
|
||||||
|
"usage_tips": "Öka värdet för att endast fånga extrema pristoppar. Minska för att inkludera fler högpristider."
|
||||||
|
},
|
||||||
|
"peak_price_min_period_length_override": {
|
||||||
|
"description": "Minsta periodlängd i 15-minuters intervaller för topppriser. Kortare pristoppar rapporteras inte som perioder.",
|
||||||
|
"long_description": "När denna entitet är aktiverad överskriver värdet 'Minsta periodlängd'-inställningen från alternativ-dialogen för topppris-periodberäkningar.",
|
||||||
|
"usage_tips": "Kortare värden fångar korta pristoppar. Längre värden fokuserar på ihållande högprisperioder."
|
||||||
|
},
|
||||||
|
"peak_price_min_periods_override": {
|
||||||
|
"description": "Minsta antal topppris-perioder att hitta dagligen.",
|
||||||
|
"long_description": "När denna entitet är aktiverad överskriver värdet 'Minsta antal perioder'-inställningen från alternativ-dialogen för topppris-periodberäkningar.",
|
||||||
|
"usage_tips": "Ställ in detta baserat på hur många högprisperioder du vill fånga per dag för automatiseringar."
|
||||||
|
},
|
||||||
|
"peak_price_relaxation_attempts_override": {
|
||||||
|
"description": "Antal försök att lätta på kriterierna för att uppnå minsta antal topppris-perioder.",
|
||||||
|
"long_description": "När denna entitet är aktiverad överskriver värdet 'Lättnadsförsök'-inställningen från alternativ-dialogen för topppris-periodberäkningar.",
|
||||||
|
"usage_tips": "Öka detta om inga perioder hittas på dagar med stabila priser. Ställ in på 0 för att tvinga strikta kriterier."
|
||||||
|
},
|
||||||
|
"peak_price_gap_count_override": {
|
||||||
|
"description": "Maximalt antal billigare intervaller som kan tillåtas mellan dyra intervaller medan de fortfarande räknas som en topppris-period.",
|
||||||
|
"long_description": "När denna entitet är aktiverad överskriver värdet 'Glaptolerans'-inställningen från alternativ-dialogen för topppris-periodberäkningar.",
|
||||||
|
"usage_tips": "Högre värden fångar längre högprisperioder även med korta prisdipp. Ställ in på 0 för strikt sammanhängande topppriser."
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"switch": {
|
||||||
|
"best_price_enable_relaxation_override": {
|
||||||
|
"description": "När aktiverad lättas kriterierna automatiskt för att uppnå minsta periodantal. När inaktiverad rapporteras endast perioder som uppfyller strikta kriterier (möjligen noll perioder på dagar med stabila priser).",
|
||||||
|
"long_description": "När denna entitet är aktiverad överskriver värdet 'Uppnå minimiantal'-inställningen från alternativ-dialogen för bästa pris-periodberäkningar.",
|
||||||
|
"usage_tips": "Aktivera detta för garanterade dagliga automatiseringsmöjligheter. Inaktivera om du endast vill ha riktigt billiga perioder, även om det innebär inga perioder vissa dagar."
|
||||||
|
},
|
||||||
|
"peak_price_enable_relaxation_override": {
|
||||||
|
"description": "När aktiverad lättas kriterierna automatiskt för att uppnå minsta periodantal. När inaktiverad rapporteras endast äkta pristoppar.",
|
||||||
|
"long_description": "När denna entitet är aktiverad överskriver värdet 'Uppnå minimiantal'-inställningen från alternativ-dialogen för topppris-periodberäkningar.",
|
||||||
|
"usage_tips": "Aktivera detta för konsekventa topppris-varningar. Inaktivera för att endast fånga extrema pristoppar."
|
||||||
|
}
|
||||||
|
},
|
||||||
"home_types": {
|
"home_types": {
|
||||||
"APARTMENT": "Lägenhet",
|
"APARTMENT": "Lägenhet",
|
||||||
"ROWHOUSE": "Radhus",
|
"ROWHOUSE": "Radhus",
|
||||||
|
|
|
||||||
|
|
@ -70,7 +70,7 @@ async def async_get_config_entry_diagnostics(
|
||||||
},
|
},
|
||||||
"cache_status": {
|
"cache_status": {
|
||||||
"user_data_cached": coordinator._cached_user_data is not None, # noqa: SLF001
|
"user_data_cached": coordinator._cached_user_data is not None, # noqa: SLF001
|
||||||
"price_data_cached": coordinator._cached_price_data is not None, # noqa: SLF001
|
"has_price_data": coordinator.data is not None and "priceInfo" in (coordinator.data or {}),
|
||||||
"transformer_cache_valid": coordinator._data_transformer._cached_transformed_data is not None, # noqa: SLF001
|
"transformer_cache_valid": coordinator._data_transformer._cached_transformed_data is not None, # noqa: SLF001
|
||||||
"period_calculator_cache_valid": coordinator._period_calculator._cached_periods is not None, # noqa: SLF001
|
"period_calculator_cache_valid": coordinator._period_calculator._cached_periods is not None, # noqa: SLF001
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -85,19 +85,25 @@ def get_dynamic_icon(
|
||||||
|
|
||||||
|
|
||||||
def get_trend_icon(key: str, value: Any) -> str | None:
|
def get_trend_icon(key: str, value: Any) -> str | None:
|
||||||
"""Get icon for trend sensors."""
|
"""Get icon for trend sensors using 5-level trend scale."""
|
||||||
# Handle next_price_trend_change TIMESTAMP sensor differently
|
# Handle next_price_trend_change TIMESTAMP sensor differently
|
||||||
# (icon based on attributes, not value which is a timestamp)
|
# (icon based on attributes, not value which is a timestamp)
|
||||||
if key == "next_price_trend_change":
|
if key == "next_price_trend_change":
|
||||||
return None # Will be handled by sensor's icon property using attributes
|
return None # Will be handled by sensor's icon property using attributes
|
||||||
|
|
||||||
if not key.startswith("price_trend_") or not isinstance(value, str):
|
if not key.startswith("price_trend_") and key != "current_price_trend":
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
if not isinstance(value, str):
|
||||||
|
return None
|
||||||
|
|
||||||
|
# 5-level trend icons: strongly uses double arrows, normal uses single
|
||||||
trend_icons = {
|
trend_icons = {
|
||||||
"rising": "mdi:trending-up",
|
"strongly_rising": "mdi:chevron-double-up", # Strong upward movement
|
||||||
"falling": "mdi:trending-down",
|
"rising": "mdi:trending-up", # Normal upward trend
|
||||||
"stable": "mdi:trending-neutral",
|
"stable": "mdi:trending-neutral", # No significant change
|
||||||
|
"falling": "mdi:trending-down", # Normal downward trend
|
||||||
|
"strongly_falling": "mdi:chevron-double-down", # Strong downward movement
|
||||||
}
|
}
|
||||||
return trend_icons.get(value)
|
return trend_icons.get(value)
|
||||||
|
|
||||||
|
|
@ -197,7 +203,7 @@ def get_price_sensor_icon(
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Only current price sensors get dynamic icons
|
# Only current price sensors get dynamic icons
|
||||||
if key == "current_interval_price":
|
if key in ("current_interval_price", "current_interval_price_base"):
|
||||||
level = get_price_level_for_icon(coordinator_data, interval_offset=0, time=time)
|
level = get_price_level_for_icon(coordinator_data, interval_offset=0, time=time)
|
||||||
if level:
|
if level:
|
||||||
return PRICE_LEVEL_CASH_ICON_MAPPING.get(level.upper())
|
return PRICE_LEVEL_CASH_ICON_MAPPING.get(level.upper())
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,15 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"get_apexcharts_yaml": {
|
"get_apexcharts_yaml": {
|
||||||
"service": "mdi:chart-line"
|
"service": "mdi:chart-line",
|
||||||
|
"sections": {
|
||||||
|
"entry_id": "mdi:identifier",
|
||||||
|
"day": "mdi:calendar-range",
|
||||||
|
"level_type": "mdi:format-list-bulleted-type",
|
||||||
|
"resolution": "mdi:timer-sand",
|
||||||
|
"highlight_best_price": "mdi:battery-charging-low",
|
||||||
|
"highlight_peak_price": "mdi:battery-alert"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"refresh_user_data": {
|
"refresh_user_data": {
|
||||||
"service": "mdi:refresh"
|
"service": "mdi:refresh"
|
||||||
|
|
|
||||||
|
|
@ -4,10 +4,15 @@ from __future__ import annotations
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from typing import Any
|
from typing import TYPE_CHECKING, Any
|
||||||
|
|
||||||
from homeassistant.util import dt as dt_utils
|
from homeassistant.util import dt as dt_utils
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from custom_components.tibber_prices.coordinator.time_service import (
|
||||||
|
TibberPricesTimeService,
|
||||||
|
)
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
_LOGGER = logging.getLogger(__name__)
|
||||||
_LOGGER_DETAILS = logging.getLogger(__name__ + ".details")
|
_LOGGER_DETAILS = logging.getLogger(__name__ + ".details")
|
||||||
|
|
||||||
|
|
@ -37,9 +42,10 @@ class TibberPricesIntervalPoolFetchGroupCache:
|
||||||
Protected: 2025-11-23 00:00 to 2025-11-27 00:00
|
Protected: 2025-11-23 00:00 to 2025-11-27 00:00
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self) -> None:
|
def __init__(self, *, time_service: TibberPricesTimeService | None = None) -> None:
|
||||||
"""Initialize empty fetch group cache."""
|
"""Initialize empty fetch group cache with optional TimeService."""
|
||||||
self._fetch_groups: list[dict[str, Any]] = []
|
self._fetch_groups: list[dict[str, Any]] = []
|
||||||
|
self._time_service = time_service
|
||||||
|
|
||||||
# Protected range cache (invalidated daily)
|
# Protected range cache (invalidated daily)
|
||||||
self._protected_range_cache: tuple[str, str] | None = None
|
self._protected_range_cache: tuple[str, str] | None = None
|
||||||
|
|
@ -93,6 +99,11 @@ class TibberPricesIntervalPoolFetchGroupCache:
|
||||||
Protected range: day-before-yesterday 00:00 to day-after-tomorrow 00:00.
|
Protected range: day-before-yesterday 00:00 to day-after-tomorrow 00:00.
|
||||||
This range shifts daily automatically.
|
This range shifts daily automatically.
|
||||||
|
|
||||||
|
Time Machine Support:
|
||||||
|
If time_service was provided at init, uses time_service.now() for
|
||||||
|
"today" calculation. This protects the correct date range when
|
||||||
|
simulating a different date.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tuple of (start_iso, end_iso) for protected range.
|
Tuple of (start_iso, end_iso) for protected range.
|
||||||
Start is inclusive, end is exclusive.
|
Start is inclusive, end is exclusive.
|
||||||
|
|
@ -102,10 +113,11 @@ class TibberPricesIntervalPoolFetchGroupCache:
|
||||||
Protected days: 2025-11-23, 2025-11-24, 2025-11-25, 2025-11-26
|
Protected days: 2025-11-23, 2025-11-24, 2025-11-25, 2025-11-26
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Check cache validity (invalidate daily)
|
# Use TimeService if available (Time Machine support), else real time
|
||||||
now = dt_utils.now()
|
now = self._time_service.now() if self._time_service else dt_utils.now()
|
||||||
today_date_str = now.date().isoformat()
|
today_date_str = now.date().isoformat()
|
||||||
|
|
||||||
|
# Check cache validity (invalidate daily)
|
||||||
if self._protected_range_cache_date == today_date_str and self._protected_range_cache:
|
if self._protected_range_cache_date == today_date_str and self._protected_range_cache:
|
||||||
return self._protected_range_cache
|
return self._protected_range_cache
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
"""Interval fetcher - gap detection and API coordination for interval pool."""
|
"""Interval fetcher - coverage check and API coordination for interval pool."""
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
|
@ -38,7 +38,7 @@ TIME_TOLERANCE_MINUTES = 1
|
||||||
|
|
||||||
|
|
||||||
class TibberPricesIntervalPoolFetcher:
|
class TibberPricesIntervalPoolFetcher:
|
||||||
"""Fetch missing intervals from API based on gap detection."""
|
"""Fetch missing intervals from API based on coverage check."""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
|
|
@ -62,14 +62,14 @@ class TibberPricesIntervalPoolFetcher:
|
||||||
self._index = index
|
self._index = index
|
||||||
self._home_id = home_id
|
self._home_id = home_id
|
||||||
|
|
||||||
def detect_gaps(
|
def check_coverage(
|
||||||
self,
|
self,
|
||||||
cached_intervals: list[dict[str, Any]],
|
cached_intervals: list[dict[str, Any]],
|
||||||
start_time_iso: str,
|
start_time_iso: str,
|
||||||
end_time_iso: str,
|
end_time_iso: str,
|
||||||
) -> list[tuple[str, str]]:
|
) -> list[tuple[str, str]]:
|
||||||
"""
|
"""
|
||||||
Detect missing time ranges that need to be fetched.
|
Check cache coverage and find missing time ranges.
|
||||||
|
|
||||||
This method minimizes API calls by:
|
This method minimizes API calls by:
|
||||||
1. Finding all gaps in cached intervals
|
1. Finding all gaps in cached intervals
|
||||||
|
|
@ -130,7 +130,7 @@ class TibberPricesIntervalPoolFetcher:
|
||||||
if time_diff_before_first > TIME_TOLERANCE_SECONDS:
|
if time_diff_before_first > TIME_TOLERANCE_SECONDS:
|
||||||
missing_ranges.append((start_time_iso, sorted_intervals[0]["startsAt"]))
|
missing_ranges.append((start_time_iso, sorted_intervals[0]["startsAt"]))
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"Gap before first cached interval: %s to %s (%.1f seconds)",
|
"Missing range before first cached interval: %s to %s (%.1f seconds)",
|
||||||
start_time_iso,
|
start_time_iso,
|
||||||
sorted_intervals[0]["startsAt"],
|
sorted_intervals[0]["startsAt"],
|
||||||
time_diff_before_first,
|
time_diff_before_first,
|
||||||
|
|
@ -163,7 +163,7 @@ class TibberPricesIntervalPoolFetcher:
|
||||||
current_interval_end = current_dt + timedelta(minutes=expected_interval_minutes)
|
current_interval_end = current_dt + timedelta(minutes=expected_interval_minutes)
|
||||||
missing_ranges.append((current_interval_end.isoformat(), next_start))
|
missing_ranges.append((current_interval_end.isoformat(), next_start))
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"Gap between cached intervals: %s (ends at %s) to %s (%.1f min gap, expected %d min)",
|
"Missing range between cached intervals: %s (ends at %s) to %s (%.1f min, expected %d min)",
|
||||||
current_start,
|
current_start,
|
||||||
current_interval_end.isoformat(),
|
current_interval_end.isoformat(),
|
||||||
next_start,
|
next_start,
|
||||||
|
|
@ -190,7 +190,7 @@ class TibberPricesIntervalPoolFetcher:
|
||||||
# Missing range starts AFTER the last cached interval ends
|
# Missing range starts AFTER the last cached interval ends
|
||||||
missing_ranges.append((last_interval_end_dt.isoformat(), end_time_iso))
|
missing_ranges.append((last_interval_end_dt.isoformat(), end_time_iso))
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"Gap after last cached interval: %s (ends at %s) to %s (%.1f seconds, need >= %d)",
|
"Missing range after last cached interval: %s (ends at %s) to %s (%.1f seconds, need >= %d)",
|
||||||
sorted_intervals[-1]["startsAt"],
|
sorted_intervals[-1]["startsAt"],
|
||||||
last_interval_end_dt.isoformat(),
|
last_interval_end_dt.isoformat(),
|
||||||
end_time_iso,
|
end_time_iso,
|
||||||
|
|
@ -200,7 +200,7 @@ class TibberPricesIntervalPoolFetcher:
|
||||||
|
|
||||||
if not missing_ranges:
|
if not missing_ranges:
|
||||||
_LOGGER.debug(
|
_LOGGER.debug(
|
||||||
"No gaps detected - all intervals cached for range %s to %s",
|
"Full coverage - all intervals cached for range %s to %s",
|
||||||
start_time_iso,
|
start_time_iso,
|
||||||
end_time_iso,
|
end_time_iso,
|
||||||
)
|
)
|
||||||
|
|
@ -285,7 +285,7 @@ class TibberPricesIntervalPoolFetcher:
|
||||||
|
|
||||||
for idx, (missing_start_iso, missing_end_iso) in enumerate(missing_ranges, start=1):
|
for idx, (missing_start_iso, missing_end_iso) in enumerate(missing_ranges, start=1):
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"API call %d/%d for home %s: fetching range %s to %s",
|
"Fetching from Tibber API (%d/%d) for home %s: range %s to %s",
|
||||||
idx,
|
idx,
|
||||||
len(missing_ranges),
|
len(missing_ranges),
|
||||||
self._home_id,
|
self._home_id,
|
||||||
|
|
@ -309,10 +309,9 @@ class TibberPricesIntervalPoolFetcher:
|
||||||
all_fetched_intervals.append(fetched_intervals)
|
all_fetched_intervals.append(fetched_intervals)
|
||||||
|
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"Fetched %d intervals from API for home %s (fetch time: %s)",
|
"Received %d intervals from Tibber API for home %s",
|
||||||
len(fetched_intervals),
|
len(fetched_intervals),
|
||||||
self._home_id,
|
self._home_id,
|
||||||
fetch_time_iso,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Notify callback if provided (for immediate caching)
|
# Notify callback if provided (for immediate caching)
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
from datetime import datetime
|
||||||
from typing import TYPE_CHECKING, Any
|
from typing import TYPE_CHECKING, Any
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
|
@ -17,6 +18,13 @@ _LOGGER_DETAILS = logging.getLogger(__name__ + ".details")
|
||||||
MAX_CACHE_SIZE = 960
|
MAX_CACHE_SIZE = 960
|
||||||
|
|
||||||
|
|
||||||
|
def _normalize_starts_at(starts_at: datetime | str) -> str:
|
||||||
|
"""Normalize startsAt to consistent format (YYYY-MM-DDTHH:MM:SS)."""
|
||||||
|
if isinstance(starts_at, datetime):
|
||||||
|
return starts_at.strftime("%Y-%m-%dT%H:%M:%S")
|
||||||
|
return starts_at[:19]
|
||||||
|
|
||||||
|
|
||||||
class TibberPricesIntervalPoolGarbageCollector:
|
class TibberPricesIntervalPoolGarbageCollector:
|
||||||
"""
|
"""
|
||||||
Manages cache eviction and dead interval cleanup.
|
Manages cache eviction and dead interval cleanup.
|
||||||
|
|
@ -77,6 +85,15 @@ class TibberPricesIntervalPoolGarbageCollector:
|
||||||
self._home_id,
|
self._home_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Phase 1.5: Remove empty fetch groups (after dead interval cleanup)
|
||||||
|
empty_removed = self._remove_empty_groups(fetch_groups)
|
||||||
|
if empty_removed > 0:
|
||||||
|
_LOGGER_DETAILS.debug(
|
||||||
|
"GC removed %d empty fetch groups (home %s)",
|
||||||
|
empty_removed,
|
||||||
|
self._home_id,
|
||||||
|
)
|
||||||
|
|
||||||
# Phase 2: Count total intervals after cleanup
|
# Phase 2: Count total intervals after cleanup
|
||||||
total_intervals = self._cache.count_total_intervals()
|
total_intervals = self._cache.count_total_intervals()
|
||||||
|
|
||||||
|
|
@ -94,7 +111,7 @@ class TibberPricesIntervalPoolGarbageCollector:
|
||||||
|
|
||||||
if not evicted_indices:
|
if not evicted_indices:
|
||||||
# All intervals are protected, cannot evict
|
# All intervals are protected, cannot evict
|
||||||
return dead_count > 0
|
return dead_count > 0 or empty_removed > 0
|
||||||
|
|
||||||
# Phase 4: Rebuild cache and index
|
# Phase 4: Rebuild cache and index
|
||||||
new_fetch_groups = [group for idx, group in enumerate(fetch_groups) if idx not in evicted_indices]
|
new_fetch_groups = [group for idx, group in enumerate(fetch_groups) if idx not in evicted_indices]
|
||||||
|
|
@ -110,6 +127,35 @@ class TibberPricesIntervalPoolGarbageCollector:
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
def _remove_empty_groups(self, fetch_groups: list[dict[str, Any]]) -> int:
|
||||||
|
"""
|
||||||
|
Remove fetch groups with no intervals.
|
||||||
|
|
||||||
|
After dead interval cleanup, some groups may be completely empty.
|
||||||
|
These should be removed to prevent memory accumulation.
|
||||||
|
|
||||||
|
Note: This modifies the cache's internal list in-place and rebuilds
|
||||||
|
the index to maintain consistency.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
fetch_groups: List of fetch groups (will be modified).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Number of empty groups removed.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# Find non-empty groups
|
||||||
|
non_empty_groups = [group for group in fetch_groups if group["intervals"]]
|
||||||
|
removed_count = len(fetch_groups) - len(non_empty_groups)
|
||||||
|
|
||||||
|
if removed_count > 0:
|
||||||
|
# Update cache with filtered list
|
||||||
|
self._cache.set_fetch_groups(non_empty_groups)
|
||||||
|
# Rebuild index since group indices changed
|
||||||
|
self._index.rebuild(non_empty_groups)
|
||||||
|
|
||||||
|
return removed_count
|
||||||
|
|
||||||
def _cleanup_dead_intervals(self, fetch_groups: list[dict[str, Any]]) -> int:
|
def _cleanup_dead_intervals(self, fetch_groups: list[dict[str, Any]]) -> int:
|
||||||
"""
|
"""
|
||||||
Remove dead intervals from all fetch groups.
|
Remove dead intervals from all fetch groups.
|
||||||
|
|
@ -135,7 +181,7 @@ class TibberPricesIntervalPoolGarbageCollector:
|
||||||
living_intervals = []
|
living_intervals = []
|
||||||
|
|
||||||
for interval_idx, interval in enumerate(old_intervals):
|
for interval_idx, interval in enumerate(old_intervals):
|
||||||
starts_at_normalized = interval["startsAt"][:19]
|
starts_at_normalized = _normalize_starts_at(interval["startsAt"])
|
||||||
index_entry = self._index.get(starts_at_normalized)
|
index_entry = self._index.get(starts_at_normalized)
|
||||||
|
|
||||||
if index_entry is not None:
|
if index_entry is not None:
|
||||||
|
|
|
||||||
|
|
@ -93,6 +93,28 @@ class TibberPricesIntervalPoolTimestampIndex:
|
||||||
starts_at_normalized = self._normalize_timestamp(timestamp)
|
starts_at_normalized = self._normalize_timestamp(timestamp)
|
||||||
self._index.pop(starts_at_normalized, None)
|
self._index.pop(starts_at_normalized, None)
|
||||||
|
|
||||||
|
def update_batch(
|
||||||
|
self,
|
||||||
|
updates: list[tuple[str, int, int]],
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Update multiple index entries efficiently in a single operation.
|
||||||
|
|
||||||
|
More efficient than calling remove() + add() for each entry,
|
||||||
|
as it avoids repeated dict operations and normalization.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
updates: List of (timestamp, fetch_group_index, interval_index) tuples.
|
||||||
|
Timestamps will be normalized automatically.
|
||||||
|
|
||||||
|
"""
|
||||||
|
for timestamp, fetch_group_index, interval_index in updates:
|
||||||
|
starts_at_normalized = self._normalize_timestamp(timestamp)
|
||||||
|
self._index[starts_at_normalized] = {
|
||||||
|
"fetch_group_index": fetch_group_index,
|
||||||
|
"interval_index": interval_index,
|
||||||
|
}
|
||||||
|
|
||||||
def clear(self) -> None:
|
def clear(self) -> None:
|
||||||
"""Clear entire index."""
|
"""Clear entire index."""
|
||||||
self._index.clear()
|
self._index.clear()
|
||||||
|
|
|
||||||
|
|
@ -3,21 +3,26 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import contextlib
|
||||||
import logging
|
import logging
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from typing import TYPE_CHECKING, Any
|
from typing import TYPE_CHECKING, Any
|
||||||
|
from zoneinfo import ZoneInfo
|
||||||
|
|
||||||
from custom_components.tibber_prices.api.exceptions import TibberPricesApiClientError
|
from custom_components.tibber_prices.api.exceptions import TibberPricesApiClientError
|
||||||
from homeassistant.util import dt as dt_utils
|
from homeassistant.util import dt as dt_utils
|
||||||
|
|
||||||
from .cache import TibberPricesIntervalPoolFetchGroupCache
|
from .cache import TibberPricesIntervalPoolFetchGroupCache
|
||||||
from .fetcher import TibberPricesIntervalPoolFetcher
|
from .fetcher import TibberPricesIntervalPoolFetcher
|
||||||
from .garbage_collector import TibberPricesIntervalPoolGarbageCollector
|
from .garbage_collector import MAX_CACHE_SIZE, TibberPricesIntervalPoolGarbageCollector
|
||||||
from .index import TibberPricesIntervalPoolTimestampIndex
|
from .index import TibberPricesIntervalPoolTimestampIndex
|
||||||
from .storage import async_save_pool_state
|
from .storage import async_save_pool_state
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from custom_components.tibber_prices.api.client import TibberPricesApiClient
|
from custom_components.tibber_prices.api.client import TibberPricesApiClient
|
||||||
|
from custom_components.tibber_prices.coordinator.time_service import (
|
||||||
|
TibberPricesTimeService,
|
||||||
|
)
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
_LOGGER = logging.getLogger(__name__)
|
||||||
_LOGGER_DETAILS = logging.getLogger(__name__ + ".details")
|
_LOGGER_DETAILS = logging.getLogger(__name__ + ".details")
|
||||||
|
|
@ -30,6 +35,13 @@ INTERVAL_QUARTER_HOURLY = 15
|
||||||
DEBOUNCE_DELAY_SECONDS = 3.0
|
DEBOUNCE_DELAY_SECONDS = 3.0
|
||||||
|
|
||||||
|
|
||||||
|
def _normalize_starts_at(starts_at: datetime | str) -> str:
|
||||||
|
"""Normalize startsAt to consistent format (YYYY-MM-DDTHH:MM:SS)."""
|
||||||
|
if isinstance(starts_at, datetime):
|
||||||
|
return starts_at.strftime("%Y-%m-%dT%H:%M:%S")
|
||||||
|
return starts_at[:19]
|
||||||
|
|
||||||
|
|
||||||
class TibberPricesIntervalPool:
|
class TibberPricesIntervalPool:
|
||||||
"""
|
"""
|
||||||
High-performance interval cache manager for a single Tibber home.
|
High-performance interval cache manager for a single Tibber home.
|
||||||
|
|
@ -70,6 +82,7 @@ class TibberPricesIntervalPool:
|
||||||
api: TibberPricesApiClient,
|
api: TibberPricesApiClient,
|
||||||
hass: Any | None = None,
|
hass: Any | None = None,
|
||||||
entry_id: str | None = None,
|
entry_id: str | None = None,
|
||||||
|
time_service: TibberPricesTimeService | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Initialize interval pool manager.
|
Initialize interval pool manager.
|
||||||
|
|
@ -79,12 +92,15 @@ class TibberPricesIntervalPool:
|
||||||
api: API client for fetching intervals.
|
api: API client for fetching intervals.
|
||||||
hass: HomeAssistant instance for auto-save (optional).
|
hass: HomeAssistant instance for auto-save (optional).
|
||||||
entry_id: Config entry ID for auto-save (optional).
|
entry_id: Config entry ID for auto-save (optional).
|
||||||
|
time_service: TimeService for time-travel support (optional).
|
||||||
|
If None, uses real time (dt_utils.now()).
|
||||||
|
|
||||||
"""
|
"""
|
||||||
self._home_id = home_id
|
self._home_id = home_id
|
||||||
|
self._time_service = time_service
|
||||||
|
|
||||||
# Initialize components with dependency injection
|
# Initialize components with dependency injection
|
||||||
self._cache = TibberPricesIntervalPoolFetchGroupCache()
|
self._cache = TibberPricesIntervalPoolFetchGroupCache(time_service=time_service)
|
||||||
self._index = TibberPricesIntervalPoolTimestampIndex()
|
self._index = TibberPricesIntervalPoolTimestampIndex()
|
||||||
self._gc = TibberPricesIntervalPoolGarbageCollector(self._cache, self._index, home_id)
|
self._gc = TibberPricesIntervalPoolGarbageCollector(self._cache, self._index, home_id)
|
||||||
self._fetcher = TibberPricesIntervalPoolFetcher(api, self._cache, self._index, home_id)
|
self._fetcher = TibberPricesIntervalPoolFetcher(api, self._cache, self._index, home_id)
|
||||||
|
|
@ -102,7 +118,7 @@ class TibberPricesIntervalPool:
|
||||||
user_data: dict[str, Any],
|
user_data: dict[str, Any],
|
||||||
start_time: datetime,
|
start_time: datetime,
|
||||||
end_time: datetime,
|
end_time: datetime,
|
||||||
) -> list[dict[str, Any]]:
|
) -> tuple[list[dict[str, Any]], bool]:
|
||||||
"""
|
"""
|
||||||
Get price intervals for time range (cached + fetch missing).
|
Get price intervals for time range (cached + fetch missing).
|
||||||
|
|
||||||
|
|
@ -123,8 +139,10 @@ class TibberPricesIntervalPool:
|
||||||
end_time: End of range (exclusive, timezone-aware).
|
end_time: End of range (exclusive, timezone-aware).
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List of price interval dicts, sorted by startsAt.
|
Tuple of (intervals, api_called):
|
||||||
Contains ALL intervals in requested range (cached + fetched).
|
- intervals: List of price interval dicts, sorted by startsAt.
|
||||||
|
Contains ALL intervals in requested range (cached + fetched).
|
||||||
|
- api_called: True if API was called to fetch missing data, False if all from cache.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
TibberPricesApiClientError: If API calls fail or validation errors.
|
TibberPricesApiClientError: If API calls fail or validation errors.
|
||||||
|
|
@ -153,19 +171,18 @@ class TibberPricesIntervalPool:
|
||||||
# Get cached intervals using index
|
# Get cached intervals using index
|
||||||
cached_intervals = self._get_cached_intervals(start_time_iso, end_time_iso)
|
cached_intervals = self._get_cached_intervals(start_time_iso, end_time_iso)
|
||||||
|
|
||||||
# Detect missing ranges
|
# Check coverage - find ranges not in cache
|
||||||
missing_ranges = self._fetcher.detect_gaps(cached_intervals, start_time_iso, end_time_iso)
|
missing_ranges = self._fetcher.check_coverage(cached_intervals, start_time_iso, end_time_iso)
|
||||||
|
|
||||||
if missing_ranges:
|
if missing_ranges:
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"Detected %d missing range(s) for home %s - will make %d API call(s)",
|
"Coverage check for home %s: %d range(s) missing - will fetch from API",
|
||||||
len(missing_ranges),
|
|
||||||
self._home_id,
|
self._home_id,
|
||||||
len(missing_ranges),
|
len(missing_ranges),
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"All intervals available in cache for home %s - zero API calls needed",
|
"Coverage check for home %s: full coverage in cache - no API calls needed",
|
||||||
self._home_id,
|
self._home_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -185,17 +202,240 @@ class TibberPricesIntervalPool:
|
||||||
# This ensures we return exactly what user requested, filtering out extra intervals
|
# This ensures we return exactly what user requested, filtering out extra intervals
|
||||||
final_result = self._get_cached_intervals(start_time_iso, end_time_iso)
|
final_result = self._get_cached_intervals(start_time_iso, end_time_iso)
|
||||||
|
|
||||||
|
# Track if API was called (True if any missing ranges were fetched)
|
||||||
|
api_called = len(missing_ranges) > 0
|
||||||
|
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"Interval pool returning %d intervals for home %s "
|
"Pool returning %d intervals for home %s (from cache: %d, fetched from API: %d ranges, api_called=%s)",
|
||||||
"(initially %d cached, %d API calls made, final %d after re-reading cache)",
|
|
||||||
len(final_result),
|
len(final_result),
|
||||||
self._home_id,
|
self._home_id,
|
||||||
len(cached_intervals),
|
len(cached_intervals),
|
||||||
len(missing_ranges),
|
len(missing_ranges),
|
||||||
len(final_result),
|
api_called,
|
||||||
)
|
)
|
||||||
|
|
||||||
return final_result
|
return final_result, api_called
|
||||||
|
|
||||||
|
async def get_sensor_data(
|
||||||
|
self,
|
||||||
|
api_client: TibberPricesApiClient,
|
||||||
|
user_data: dict[str, Any],
|
||||||
|
home_timezone: str | None = None,
|
||||||
|
*,
|
||||||
|
include_tomorrow: bool = True,
|
||||||
|
) -> tuple[list[dict[str, Any]], bool]:
|
||||||
|
"""
|
||||||
|
Get price intervals for sensor data (day-before-yesterday to end-of-tomorrow).
|
||||||
|
|
||||||
|
Convenience method for coordinator/sensors that need the standard 4-day window:
|
||||||
|
- Day before yesterday (for trailing 24h averages at midnight)
|
||||||
|
- Yesterday (for trailing 24h averages)
|
||||||
|
- Today (current prices)
|
||||||
|
- Tomorrow (if available in cache)
|
||||||
|
|
||||||
|
IMPORTANT - Two distinct behaviors:
|
||||||
|
1. API FETCH: Controlled by include_tomorrow flag
|
||||||
|
- include_tomorrow=False → Only fetch up to end of today (prevents API spam before 13:00)
|
||||||
|
- include_tomorrow=True → Fetch including tomorrow data
|
||||||
|
2. RETURN DATA: Always returns full protected range (including tomorrow if cached)
|
||||||
|
- This ensures cached tomorrow data is used even if include_tomorrow=False
|
||||||
|
|
||||||
|
The separation prevents the following bug:
|
||||||
|
- If include_tomorrow affected both fetch AND return, cached tomorrow data
|
||||||
|
would be lost when include_tomorrow=False, causing infinite refresh loops.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
api_client: TibberPricesApiClient instance for API calls.
|
||||||
|
user_data: User data dict containing home metadata.
|
||||||
|
home_timezone: Optional timezone string (e.g., "Europe/Berlin").
|
||||||
|
include_tomorrow: If True, fetch tomorrow's data from API. If False,
|
||||||
|
only fetch up to end of today. Default True.
|
||||||
|
DOES NOT affect returned data - always returns full range.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (intervals, api_called):
|
||||||
|
- intervals: List of price interval dicts for the 4-day window (including any cached
|
||||||
|
tomorrow data), sorted by startsAt.
|
||||||
|
- api_called: True if API was called to fetch missing data, False if all from cache.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# Determine timezone
|
||||||
|
tz_str = home_timezone
|
||||||
|
if not tz_str:
|
||||||
|
tz_str = self._extract_timezone_from_user_data(user_data)
|
||||||
|
|
||||||
|
# Calculate range in home's timezone
|
||||||
|
tz = ZoneInfo(tz_str) if tz_str else None
|
||||||
|
now = self._time_service.now() if self._time_service else dt_utils.now()
|
||||||
|
now_local = now.astimezone(tz) if tz else now
|
||||||
|
|
||||||
|
# Day before yesterday 00:00 (start) - same for both fetch and return
|
||||||
|
day_before_yesterday = (now_local - timedelta(days=2)).replace(hour=0, minute=0, second=0, microsecond=0)
|
||||||
|
|
||||||
|
# End of tomorrow (full protected range) - used for RETURN data
|
||||||
|
end_of_tomorrow = (now_local + timedelta(days=2)).replace(hour=0, minute=0, second=0, microsecond=0)
|
||||||
|
|
||||||
|
# API fetch range depends on include_tomorrow flag
|
||||||
|
if include_tomorrow:
|
||||||
|
fetch_end_time = end_of_tomorrow
|
||||||
|
fetch_desc = "end-of-tomorrow"
|
||||||
|
else:
|
||||||
|
# Only fetch up to end of today (prevents API spam before 13:00)
|
||||||
|
fetch_end_time = (now_local + timedelta(days=1)).replace(hour=0, minute=0, second=0, microsecond=0)
|
||||||
|
fetch_desc = "end-of-today"
|
||||||
|
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Sensor data request for home %s: fetch %s to %s (%s), return up to %s",
|
||||||
|
self._home_id,
|
||||||
|
day_before_yesterday.isoformat(),
|
||||||
|
fetch_end_time.isoformat(),
|
||||||
|
fetch_desc,
|
||||||
|
end_of_tomorrow.isoformat(),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Fetch data (may be partial if include_tomorrow=False)
|
||||||
|
_intervals, api_called = await self.get_intervals(
|
||||||
|
api_client=api_client,
|
||||||
|
user_data=user_data,
|
||||||
|
start_time=day_before_yesterday,
|
||||||
|
end_time=fetch_end_time,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Return FULL protected range (including any cached tomorrow data)
|
||||||
|
# This ensures cached tomorrow data is available even when include_tomorrow=False
|
||||||
|
final_intervals = self._get_cached_intervals(
|
||||||
|
day_before_yesterday.isoformat(),
|
||||||
|
end_of_tomorrow.isoformat(),
|
||||||
|
)
|
||||||
|
|
||||||
|
return final_intervals, api_called
|
||||||
|
|
||||||
|
def get_pool_stats(self) -> dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get statistics about the interval pool.
|
||||||
|
|
||||||
|
Returns comprehensive statistics for diagnostic sensors, separated into:
|
||||||
|
- Sensor intervals (protected range: day-before-yesterday to tomorrow)
|
||||||
|
- Cache statistics (entire pool including service-requested data)
|
||||||
|
|
||||||
|
Protected Range:
|
||||||
|
The protected range covers 4 days at 15-min resolution = 384 intervals.
|
||||||
|
These intervals are never evicted by garbage collection.
|
||||||
|
|
||||||
|
Cache Fill Level:
|
||||||
|
Shows how full the cache is relative to MAX_CACHE_SIZE (960).
|
||||||
|
100% is not bad - just means we're using the available space.
|
||||||
|
GC will evict oldest non-protected intervals when limit is reached.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with sensor intervals, cache stats, and timestamps.
|
||||||
|
|
||||||
|
"""
|
||||||
|
fetch_groups = self._cache.get_fetch_groups()
|
||||||
|
|
||||||
|
# === Sensor Intervals (Protected Range) ===
|
||||||
|
sensor_stats = self._get_sensor_interval_stats()
|
||||||
|
|
||||||
|
# === Cache Statistics (Entire Pool) ===
|
||||||
|
cache_total = self._index.count()
|
||||||
|
cache_limit = MAX_CACHE_SIZE
|
||||||
|
cache_fill_percent = round((cache_total / cache_limit) * 100, 1) if cache_limit > 0 else 0
|
||||||
|
cache_extra = max(0, cache_total - sensor_stats["count"]) # Intervals outside protected range
|
||||||
|
|
||||||
|
# === Timestamps ===
|
||||||
|
# Last sensor fetch (for protected range data)
|
||||||
|
last_sensor_fetch: str | None = None
|
||||||
|
oldest_interval: str | None = None
|
||||||
|
newest_interval: str | None = None
|
||||||
|
|
||||||
|
if fetch_groups:
|
||||||
|
# Find newest fetch group (most recent API call)
|
||||||
|
newest_group = max(fetch_groups, key=lambda g: g["fetched_at"])
|
||||||
|
last_sensor_fetch = newest_group["fetched_at"].isoformat()
|
||||||
|
|
||||||
|
# Find oldest and newest intervals across all fetch groups
|
||||||
|
all_timestamps = list(self._index.get_raw_index().keys())
|
||||||
|
if all_timestamps:
|
||||||
|
oldest_interval = min(all_timestamps)
|
||||||
|
newest_interval = max(all_timestamps)
|
||||||
|
|
||||||
|
return {
|
||||||
|
# Sensor intervals (protected range)
|
||||||
|
"sensor_intervals_count": sensor_stats["count"],
|
||||||
|
"sensor_intervals_expected": sensor_stats["expected"],
|
||||||
|
"sensor_intervals_has_gaps": sensor_stats["has_gaps"],
|
||||||
|
# Cache statistics
|
||||||
|
"cache_intervals_total": cache_total,
|
||||||
|
"cache_intervals_limit": cache_limit,
|
||||||
|
"cache_fill_percent": cache_fill_percent,
|
||||||
|
"cache_intervals_extra": cache_extra,
|
||||||
|
# Timestamps
|
||||||
|
"last_sensor_fetch": last_sensor_fetch,
|
||||||
|
"cache_oldest_interval": oldest_interval,
|
||||||
|
"cache_newest_interval": newest_interval,
|
||||||
|
# Fetch groups (API calls)
|
||||||
|
"fetch_groups_count": len(fetch_groups),
|
||||||
|
}
|
||||||
|
|
||||||
|
def _get_sensor_interval_stats(self) -> dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get statistics for sensor intervals (protected range).
|
||||||
|
|
||||||
|
Protected range: day-before-yesterday 00:00 to day-after-tomorrow 00:00.
|
||||||
|
Expected: 4 days * 24 hours * 4 intervals = 384 intervals.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with count, expected, and has_gaps.
|
||||||
|
|
||||||
|
"""
|
||||||
|
start_iso, end_iso = self._cache.get_protected_range()
|
||||||
|
start_dt = datetime.fromisoformat(start_iso)
|
||||||
|
end_dt = datetime.fromisoformat(end_iso)
|
||||||
|
|
||||||
|
# Count expected intervals (15-min resolution)
|
||||||
|
expected_count = int((end_dt - start_dt).total_seconds() / (15 * 60))
|
||||||
|
|
||||||
|
# Count actual intervals in range
|
||||||
|
actual_count = 0
|
||||||
|
current_dt = start_dt
|
||||||
|
|
||||||
|
while current_dt < end_dt:
|
||||||
|
current_key = current_dt.isoformat()[:19]
|
||||||
|
if self._index.contains(current_key):
|
||||||
|
actual_count += 1
|
||||||
|
current_dt += timedelta(minutes=15)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"count": actual_count,
|
||||||
|
"expected": expected_count,
|
||||||
|
"has_gaps": actual_count < expected_count,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _has_gaps_in_protected_range(self) -> bool:
|
||||||
|
"""
|
||||||
|
Check if there are gaps in the protected date range.
|
||||||
|
|
||||||
|
Delegates to _get_sensor_interval_stats() for consistency.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if any gaps exist, False if protected range is complete.
|
||||||
|
|
||||||
|
"""
|
||||||
|
return self._get_sensor_interval_stats()["has_gaps"]
|
||||||
|
|
||||||
|
def _extract_timezone_from_user_data(self, user_data: dict[str, Any]) -> str | None:
|
||||||
|
"""Extract timezone for this home from user_data."""
|
||||||
|
if not user_data:
|
||||||
|
return None
|
||||||
|
|
||||||
|
viewer = user_data.get("viewer", {})
|
||||||
|
homes = viewer.get("homes", [])
|
||||||
|
|
||||||
|
for home in homes:
|
||||||
|
if home.get("id") == self._home_id:
|
||||||
|
return home.get("timeZone")
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
def _get_cached_intervals(
|
def _get_cached_intervals(
|
||||||
self,
|
self,
|
||||||
|
|
@ -207,30 +447,47 @@ class TibberPricesIntervalPool:
|
||||||
|
|
||||||
Uses timestamp_index for O(1) lookups per timestamp.
|
Uses timestamp_index for O(1) lookups per timestamp.
|
||||||
|
|
||||||
|
IMPORTANT: Returns shallow copies of interval dicts to prevent external
|
||||||
|
mutations (e.g., by parse_all_timestamps()) from affecting cached data.
|
||||||
|
The Pool cache must remain immutable to ensure consistent behavior.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
start_time_iso: ISO timestamp string (inclusive).
|
start_time_iso: ISO timestamp string (inclusive).
|
||||||
end_time_iso: ISO timestamp string (exclusive).
|
end_time_iso: ISO timestamp string (exclusive).
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List of cached interval dicts in time range (may be empty or incomplete).
|
List of cached interval dicts in time range (may be empty or incomplete).
|
||||||
Sorted by startsAt timestamp.
|
Sorted by startsAt timestamp. Each dict is a shallow copy.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Parse query range once
|
# Parse query range once
|
||||||
start_time_dt = datetime.fromisoformat(start_time_iso)
|
start_time_dt = datetime.fromisoformat(start_time_iso)
|
||||||
end_time_dt = datetime.fromisoformat(end_time_iso)
|
end_time_dt = datetime.fromisoformat(end_time_iso)
|
||||||
|
|
||||||
|
# CRITICAL: Use NAIVE local timestamps for iteration.
|
||||||
|
#
|
||||||
|
# Index keys are naive local timestamps (timezone stripped via [:19]).
|
||||||
|
# When start and end span a DST transition, they have different UTC offsets
|
||||||
|
# (e.g., start=+01:00 CET, end=+02:00 CEST). Using fixed-offset datetimes
|
||||||
|
# from fromisoformat() causes the loop to compare UTC values for the end
|
||||||
|
# boundary, ending 1 hour early on spring-forward days (or 1 hour late on
|
||||||
|
# fall-back days).
|
||||||
|
#
|
||||||
|
# By iterating in naive local time, we match the index key format exactly
|
||||||
|
# and the end boundary comparison works correctly regardless of DST.
|
||||||
|
current_naive = start_time_dt.replace(tzinfo=None)
|
||||||
|
end_naive = end_time_dt.replace(tzinfo=None)
|
||||||
|
|
||||||
# Use index to find intervals: iterate through expected timestamps
|
# Use index to find intervals: iterate through expected timestamps
|
||||||
result = []
|
result = []
|
||||||
current_dt = start_time_dt
|
|
||||||
|
|
||||||
# Determine interval step (15 min post-2025-10-01, 60 min pre)
|
# Determine interval step (15 min post-2025-10-01, 60 min pre)
|
||||||
resolution_change_dt = datetime(2025, 10, 1, tzinfo=start_time_dt.tzinfo)
|
resolution_change_naive = datetime(2025, 10, 1) # noqa: DTZ001
|
||||||
interval_minutes = INTERVAL_QUARTER_HOURLY if current_dt >= resolution_change_dt else INTERVAL_HOURLY
|
interval_minutes = INTERVAL_QUARTER_HOURLY if current_naive >= resolution_change_naive else INTERVAL_HOURLY
|
||||||
|
|
||||||
while current_dt < end_time_dt:
|
while current_naive < end_naive:
|
||||||
# Check if this timestamp exists in index (O(1) lookup)
|
# Check if this timestamp exists in index (O(1) lookup)
|
||||||
current_dt_key = current_dt.isoformat()[:19]
|
current_dt_key = current_naive.isoformat()[:19]
|
||||||
location = self._index.get(current_dt_key)
|
location = self._index.get(current_dt_key)
|
||||||
|
|
||||||
if location is not None:
|
if location is not None:
|
||||||
|
|
@ -238,19 +495,21 @@ class TibberPricesIntervalPool:
|
||||||
fetch_groups = self._cache.get_fetch_groups()
|
fetch_groups = self._cache.get_fetch_groups()
|
||||||
fetch_group = fetch_groups[location["fetch_group_index"]]
|
fetch_group = fetch_groups[location["fetch_group_index"]]
|
||||||
interval = fetch_group["intervals"][location["interval_index"]]
|
interval = fetch_group["intervals"][location["interval_index"]]
|
||||||
result.append(interval)
|
# CRITICAL: Return shallow copy to prevent external mutations
|
||||||
|
# (e.g., parse_all_timestamps() converts startsAt to datetime in-place)
|
||||||
|
result.append(dict(interval))
|
||||||
|
|
||||||
# Move to next expected interval
|
# Move to next expected interval
|
||||||
current_dt += timedelta(minutes=interval_minutes)
|
current_naive += timedelta(minutes=interval_minutes)
|
||||||
|
|
||||||
# Handle resolution change boundary
|
# Handle resolution change boundary
|
||||||
if interval_minutes == INTERVAL_HOURLY and current_dt >= resolution_change_dt:
|
if interval_minutes == INTERVAL_HOURLY and current_naive >= resolution_change_naive:
|
||||||
interval_minutes = INTERVAL_QUARTER_HOURLY
|
interval_minutes = INTERVAL_QUARTER_HOURLY
|
||||||
|
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"Cache lookup for home %s: found %d intervals in range %s to %s",
|
"Retrieved %d intervals from cache for home %s (range %s to %s)",
|
||||||
self._home_id,
|
|
||||||
len(result),
|
len(result),
|
||||||
|
self._home_id,
|
||||||
start_time_iso,
|
start_time_iso,
|
||||||
end_time_iso,
|
end_time_iso,
|
||||||
)
|
)
|
||||||
|
|
@ -288,7 +547,7 @@ class TibberPricesIntervalPool:
|
||||||
intervals_to_touch = []
|
intervals_to_touch = []
|
||||||
|
|
||||||
for interval in intervals:
|
for interval in intervals:
|
||||||
starts_at_normalized = interval["startsAt"][:19]
|
starts_at_normalized = _normalize_starts_at(interval["startsAt"])
|
||||||
if not self._index.contains(starts_at_normalized):
|
if not self._index.contains(starts_at_normalized):
|
||||||
new_intervals.append(interval)
|
new_intervals.append(interval)
|
||||||
else:
|
else:
|
||||||
|
|
@ -320,7 +579,7 @@ class TibberPricesIntervalPool:
|
||||||
|
|
||||||
# Update timestamp index for all new intervals
|
# Update timestamp index for all new intervals
|
||||||
for interval_index, interval in enumerate(new_intervals):
|
for interval_index, interval in enumerate(new_intervals):
|
||||||
starts_at_normalized = interval["startsAt"][:19]
|
starts_at_normalized = _normalize_starts_at(interval["startsAt"])
|
||||||
self._index.add(interval, fetch_group_index, interval_index)
|
self._index.add(interval, fetch_group_index, interval_index)
|
||||||
|
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
|
|
@ -372,13 +631,13 @@ class TibberPricesIntervalPool:
|
||||||
# Add touch group to cache
|
# Add touch group to cache
|
||||||
touch_group_index = self._cache.add_fetch_group(touch_intervals, fetch_time_dt)
|
touch_group_index = self._cache.add_fetch_group(touch_intervals, fetch_time_dt)
|
||||||
|
|
||||||
# Update index to point to new fetch group
|
# Update index to point to new fetch group using batch operation
|
||||||
for interval_index, (starts_at_normalized, _) in enumerate(intervals_to_touch):
|
# This is more efficient than individual remove+add calls
|
||||||
# Remove old index entry
|
index_updates = [
|
||||||
self._index.remove(starts_at_normalized)
|
(starts_at_normalized, touch_group_index, interval_index)
|
||||||
# Add new index entry pointing to touch group
|
for interval_index, (starts_at_normalized, _) in enumerate(intervals_to_touch)
|
||||||
interval = touch_intervals[interval_index]
|
]
|
||||||
self._index.add(interval, touch_group_index, interval_index)
|
self._index.update_batch(index_updates)
|
||||||
|
|
||||||
_LOGGER.debug(
|
_LOGGER.debug(
|
||||||
"Touched %d cached intervals for home %s (moved to fetch group %d, fetched at %s)",
|
"Touched %d cached intervals for home %s (moved to fetch group %d, fetched at %s)",
|
||||||
|
|
@ -419,6 +678,36 @@ class TibberPricesIntervalPool:
|
||||||
_LOGGER.debug("Auto-save timer cancelled (expected - new changes arrived)")
|
_LOGGER.debug("Auto-save timer cancelled (expected - new changes arrived)")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
async def async_shutdown(self) -> None:
|
||||||
|
"""
|
||||||
|
Clean shutdown - cancel pending background tasks.
|
||||||
|
|
||||||
|
Should be called when the config entry is unloaded to prevent
|
||||||
|
orphaned tasks and ensure clean resource cleanup.
|
||||||
|
|
||||||
|
"""
|
||||||
|
_LOGGER.debug("Shutting down interval pool for home %s", self._home_id)
|
||||||
|
|
||||||
|
# Cancel debounce task if running
|
||||||
|
if self._save_debounce_task is not None and not self._save_debounce_task.done():
|
||||||
|
self._save_debounce_task.cancel()
|
||||||
|
with contextlib.suppress(asyncio.CancelledError):
|
||||||
|
await self._save_debounce_task
|
||||||
|
_LOGGER.debug("Cancelled pending auto-save task")
|
||||||
|
|
||||||
|
# Cancel any other background tasks
|
||||||
|
if self._background_tasks:
|
||||||
|
for task in list(self._background_tasks):
|
||||||
|
if not task.done():
|
||||||
|
task.cancel()
|
||||||
|
# Wait for all tasks to complete cancellation
|
||||||
|
if self._background_tasks:
|
||||||
|
await asyncio.gather(*self._background_tasks, return_exceptions=True)
|
||||||
|
_LOGGER.debug("Cancelled %d background tasks", len(self._background_tasks))
|
||||||
|
self._background_tasks.clear()
|
||||||
|
|
||||||
|
_LOGGER.debug("Interval pool shutdown complete for home %s", self._home_id)
|
||||||
|
|
||||||
async def _auto_save_pool_state(self) -> None:
|
async def _auto_save_pool_state(self) -> None:
|
||||||
"""Auto-save pool state to storage with lock protection."""
|
"""Auto-save pool state to storage with lock protection."""
|
||||||
if self._hass is None or self._entry_id is None:
|
if self._hass is None or self._entry_id is None:
|
||||||
|
|
@ -451,7 +740,7 @@ class TibberPricesIntervalPool:
|
||||||
living_intervals = []
|
living_intervals = []
|
||||||
|
|
||||||
for interval_idx, interval in enumerate(fetch_group["intervals"]):
|
for interval_idx, interval in enumerate(fetch_group["intervals"]):
|
||||||
starts_at_normalized = interval["startsAt"][:19]
|
starts_at_normalized = _normalize_starts_at(interval["startsAt"])
|
||||||
|
|
||||||
# Check if interval is still referenced in index
|
# Check if interval is still referenced in index
|
||||||
location = self._index.get(starts_at_normalized)
|
location = self._index.get(starts_at_normalized)
|
||||||
|
|
@ -486,6 +775,7 @@ class TibberPricesIntervalPool:
|
||||||
api: TibberPricesApiClient,
|
api: TibberPricesApiClient,
|
||||||
hass: Any | None = None,
|
hass: Any | None = None,
|
||||||
entry_id: str | None = None,
|
entry_id: str | None = None,
|
||||||
|
time_service: TibberPricesTimeService | None = None,
|
||||||
) -> TibberPricesIntervalPool | None:
|
) -> TibberPricesIntervalPool | None:
|
||||||
"""
|
"""
|
||||||
Restore interval pool manager from storage.
|
Restore interval pool manager from storage.
|
||||||
|
|
@ -498,6 +788,7 @@ class TibberPricesIntervalPool:
|
||||||
api: API client for fetching intervals.
|
api: API client for fetching intervals.
|
||||||
hass: HomeAssistant instance for auto-save (optional).
|
hass: HomeAssistant instance for auto-save (optional).
|
||||||
entry_id: Config entry ID for auto-save (optional).
|
entry_id: Config entry ID for auto-save (optional).
|
||||||
|
time_service: TimeService for time-travel support (optional).
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Restored TibberPricesIntervalPool instance, or None if format unknown/corrupted.
|
Restored TibberPricesIntervalPool instance, or None if format unknown/corrupted.
|
||||||
|
|
@ -517,7 +808,7 @@ class TibberPricesIntervalPool:
|
||||||
home_id = data["home_id"]
|
home_id = data["home_id"]
|
||||||
|
|
||||||
# Create manager with home_id from storage
|
# Create manager with home_id from storage
|
||||||
manager = cls(home_id=home_id, api=api, hass=hass, entry_id=entry_id)
|
manager = cls(home_id=home_id, api=api, hass=hass, entry_id=entry_id, time_service=time_service)
|
||||||
|
|
||||||
# Restore fetch groups to cache
|
# Restore fetch groups to cache
|
||||||
for serialized_group in data.get("fetch_groups", []):
|
for serialized_group in data.get("fetch_groups", []):
|
||||||
|
|
|
||||||
|
|
@ -11,5 +11,5 @@
|
||||||
"requirements": [
|
"requirements": [
|
||||||
"aiofiles>=23.2.1"
|
"aiofiles>=23.2.1"
|
||||||
],
|
],
|
||||||
"version": "0.23.1"
|
"version": "0.27.0"
|
||||||
}
|
}
|
||||||
|
|
|
||||||
39
custom_components/tibber_prices/number/__init__.py
Normal file
39
custom_components/tibber_prices/number/__init__.py
Normal file
|
|
@ -0,0 +1,39 @@
|
||||||
|
"""
|
||||||
|
Number platform for Tibber Prices integration.
|
||||||
|
|
||||||
|
Provides configurable number entities for runtime overrides of Best Price
|
||||||
|
and Peak Price period calculation settings. These entities allow automation
|
||||||
|
of configuration parameters without using the options flow.
|
||||||
|
|
||||||
|
When enabled, these entities take precedence over the options flow settings.
|
||||||
|
When disabled (default), the options flow settings are used.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
|
from .core import TibberPricesConfigNumber
|
||||||
|
from .definitions import NUMBER_ENTITY_DESCRIPTIONS
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from custom_components.tibber_prices.data import TibberPricesConfigEntry
|
||||||
|
from homeassistant.core import HomeAssistant
|
||||||
|
from homeassistant.helpers.entity_platform import AddEntitiesCallback
|
||||||
|
|
||||||
|
|
||||||
|
async def async_setup_entry(
|
||||||
|
_hass: HomeAssistant,
|
||||||
|
entry: TibberPricesConfigEntry,
|
||||||
|
async_add_entities: AddEntitiesCallback,
|
||||||
|
) -> None:
|
||||||
|
"""Set up Tibber Prices number entities based on a config entry."""
|
||||||
|
coordinator = entry.runtime_data.coordinator
|
||||||
|
|
||||||
|
async_add_entities(
|
||||||
|
TibberPricesConfigNumber(
|
||||||
|
coordinator=coordinator,
|
||||||
|
entity_description=entity_description,
|
||||||
|
)
|
||||||
|
for entity_description in NUMBER_ENTITY_DESCRIPTIONS
|
||||||
|
)
|
||||||
242
custom_components/tibber_prices/number/core.py
Normal file
242
custom_components/tibber_prices/number/core.py
Normal file
|
|
@ -0,0 +1,242 @@
|
||||||
|
"""
|
||||||
|
Number entity implementation for Tibber Prices configuration overrides.
|
||||||
|
|
||||||
|
These entities allow runtime configuration of period calculation settings.
|
||||||
|
When a config entity is enabled, its value takes precedence over the
|
||||||
|
options flow setting for period calculations.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import TYPE_CHECKING, Any
|
||||||
|
|
||||||
|
from custom_components.tibber_prices.const import (
|
||||||
|
DOMAIN,
|
||||||
|
get_home_type_translation,
|
||||||
|
get_translation,
|
||||||
|
)
|
||||||
|
from homeassistant.components.number import NumberEntity, RestoreNumber
|
||||||
|
from homeassistant.core import callback
|
||||||
|
from homeassistant.helpers.device_registry import DeviceEntryType, DeviceInfo
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from custom_components.tibber_prices.coordinator import (
|
||||||
|
TibberPricesDataUpdateCoordinator,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .definitions import TibberPricesNumberEntityDescription
|
||||||
|
|
||||||
|
_LOGGER = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class TibberPricesConfigNumber(RestoreNumber, NumberEntity):
|
||||||
|
"""
|
||||||
|
A number entity for configuring period calculation settings at runtime.
|
||||||
|
|
||||||
|
When this entity is enabled, its value overrides the corresponding
|
||||||
|
options flow setting. When disabled (default), the options flow
|
||||||
|
setting is used for period calculations.
|
||||||
|
|
||||||
|
The entity restores its value after Home Assistant restart.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_attr_has_entity_name = True
|
||||||
|
entity_description: TibberPricesNumberEntityDescription
|
||||||
|
|
||||||
|
# Exclude all attributes from recorder history - config entities don't need history
|
||||||
|
_unrecorded_attributes = frozenset(
|
||||||
|
{
|
||||||
|
"description",
|
||||||
|
"long_description",
|
||||||
|
"usage_tips",
|
||||||
|
"friendly_name",
|
||||||
|
"icon",
|
||||||
|
"unit_of_measurement",
|
||||||
|
"mode",
|
||||||
|
"min",
|
||||||
|
"max",
|
||||||
|
"step",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
coordinator: TibberPricesDataUpdateCoordinator,
|
||||||
|
entity_description: TibberPricesNumberEntityDescription,
|
||||||
|
) -> None:
|
||||||
|
"""Initialize the config number entity."""
|
||||||
|
self.coordinator = coordinator
|
||||||
|
self.entity_description = entity_description
|
||||||
|
|
||||||
|
# Set unique ID
|
||||||
|
self._attr_unique_id = (
|
||||||
|
f"{coordinator.config_entry.unique_id or coordinator.config_entry.entry_id}_{entity_description.key}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize with None - will be set in async_added_to_hass
|
||||||
|
self._attr_native_value: float | None = None
|
||||||
|
|
||||||
|
# Setup device info
|
||||||
|
self._setup_device_info()
|
||||||
|
|
||||||
|
def _setup_device_info(self) -> None:
|
||||||
|
"""Set up device information."""
|
||||||
|
home_name, home_id, home_type = self._get_device_info()
|
||||||
|
language = self.coordinator.hass.config.language or "en"
|
||||||
|
translated_model = get_home_type_translation(home_type, language) if home_type else "Unknown"
|
||||||
|
|
||||||
|
self._attr_device_info = DeviceInfo(
|
||||||
|
entry_type=DeviceEntryType.SERVICE,
|
||||||
|
identifiers={
|
||||||
|
(
|
||||||
|
DOMAIN,
|
||||||
|
self.coordinator.config_entry.unique_id or self.coordinator.config_entry.entry_id,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
name=home_name,
|
||||||
|
manufacturer="Tibber",
|
||||||
|
model=translated_model,
|
||||||
|
serial_number=home_id if home_id else None,
|
||||||
|
configuration_url="https://developer.tibber.com/explorer",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_device_info(self) -> tuple[str, str | None, str | None]:
|
||||||
|
"""Get device name, ID and type."""
|
||||||
|
user_profile = self.coordinator.get_user_profile()
|
||||||
|
is_subentry = bool(self.coordinator.config_entry.data.get("home_id"))
|
||||||
|
home_id = self.coordinator.config_entry.unique_id
|
||||||
|
home_type = None
|
||||||
|
|
||||||
|
if is_subentry:
|
||||||
|
home_data = self.coordinator.config_entry.data.get("home_data", {})
|
||||||
|
home_id = self.coordinator.config_entry.data.get("home_id")
|
||||||
|
address = home_data.get("address", {})
|
||||||
|
address1 = address.get("address1", "")
|
||||||
|
city = address.get("city", "")
|
||||||
|
app_nickname = home_data.get("appNickname", "")
|
||||||
|
home_type = home_data.get("type", "")
|
||||||
|
|
||||||
|
if app_nickname and app_nickname.strip():
|
||||||
|
home_name = app_nickname.strip()
|
||||||
|
elif address1:
|
||||||
|
home_name = address1
|
||||||
|
if city:
|
||||||
|
home_name = f"{home_name}, {city}"
|
||||||
|
else:
|
||||||
|
home_name = f"Tibber Home {home_id[:8]}" if home_id else "Tibber Home"
|
||||||
|
elif user_profile:
|
||||||
|
home_name = user_profile.get("name") or "Tibber Home"
|
||||||
|
else:
|
||||||
|
home_name = "Tibber Home"
|
||||||
|
|
||||||
|
return home_name, home_id, home_type
|
||||||
|
|
||||||
|
async def async_added_to_hass(self) -> None:
|
||||||
|
"""Handle entity which was added to Home Assistant."""
|
||||||
|
await super().async_added_to_hass()
|
||||||
|
|
||||||
|
# Try to restore previous state
|
||||||
|
last_number_data = await self.async_get_last_number_data()
|
||||||
|
if last_number_data is not None and last_number_data.native_value is not None:
|
||||||
|
self._attr_native_value = last_number_data.native_value
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Restored %s value: %s",
|
||||||
|
self.entity_description.key,
|
||||||
|
self._attr_native_value,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Initialize with value from options flow (or default)
|
||||||
|
self._attr_native_value = self._get_value_from_options()
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Initialized %s from options: %s",
|
||||||
|
self.entity_description.key,
|
||||||
|
self._attr_native_value,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Register override with coordinator if entity is enabled
|
||||||
|
# This happens during add, so check entity registry
|
||||||
|
await self._sync_override_state()
|
||||||
|
|
||||||
|
async def async_will_remove_from_hass(self) -> None:
|
||||||
|
"""Handle entity removal from Home Assistant."""
|
||||||
|
# Remove override when entity is removed
|
||||||
|
self.coordinator.remove_config_override(
|
||||||
|
self.entity_description.config_key,
|
||||||
|
self.entity_description.config_section,
|
||||||
|
)
|
||||||
|
await super().async_will_remove_from_hass()
|
||||||
|
|
||||||
|
def _get_value_from_options(self) -> float:
|
||||||
|
"""Get the current value from options flow or default."""
|
||||||
|
options = self.coordinator.config_entry.options
|
||||||
|
section = options.get(self.entity_description.config_section, {})
|
||||||
|
value = section.get(
|
||||||
|
self.entity_description.config_key,
|
||||||
|
self.entity_description.default_value,
|
||||||
|
)
|
||||||
|
return float(value)
|
||||||
|
|
||||||
|
async def _sync_override_state(self) -> None:
|
||||||
|
"""Sync the override state with the coordinator based on entity enabled state."""
|
||||||
|
# Check if entity is enabled in registry
|
||||||
|
if self.registry_entry is not None and not self.registry_entry.disabled:
|
||||||
|
# Entity is enabled - register the override
|
||||||
|
if self._attr_native_value is not None:
|
||||||
|
self.coordinator.set_config_override(
|
||||||
|
self.entity_description.config_key,
|
||||||
|
self.entity_description.config_section,
|
||||||
|
self._attr_native_value,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Entity is disabled - remove override
|
||||||
|
self.coordinator.remove_config_override(
|
||||||
|
self.entity_description.config_key,
|
||||||
|
self.entity_description.config_section,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def async_set_native_value(self, value: float) -> None:
|
||||||
|
"""Update the current value and trigger recalculation."""
|
||||||
|
self._attr_native_value = value
|
||||||
|
|
||||||
|
# Update the coordinator's runtime override
|
||||||
|
self.coordinator.set_config_override(
|
||||||
|
self.entity_description.config_key,
|
||||||
|
self.entity_description.config_section,
|
||||||
|
value,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Trigger period recalculation (same path as options update)
|
||||||
|
await self.coordinator.async_handle_config_override_update()
|
||||||
|
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Updated %s to %s, triggered period recalculation",
|
||||||
|
self.entity_description.key,
|
||||||
|
value,
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def extra_state_attributes(self) -> dict[str, Any] | None:
|
||||||
|
"""Return entity state attributes with description."""
|
||||||
|
language = self.coordinator.hass.config.language or "en"
|
||||||
|
|
||||||
|
# Try to get description from custom translations
|
||||||
|
# Custom translations use direct path: number.{key}.description
|
||||||
|
translation_path = [
|
||||||
|
"number",
|
||||||
|
self.entity_description.translation_key or self.entity_description.key,
|
||||||
|
"description",
|
||||||
|
]
|
||||||
|
description = get_translation(translation_path, language)
|
||||||
|
|
||||||
|
attrs: dict[str, Any] = {}
|
||||||
|
if description:
|
||||||
|
attrs["description"] = description
|
||||||
|
|
||||||
|
return attrs if attrs else None
|
||||||
|
|
||||||
|
@callback
|
||||||
|
def async_registry_entry_updated(self) -> None:
|
||||||
|
"""Handle entity registry update (enabled/disabled state change)."""
|
||||||
|
# This is called when the entity is enabled/disabled in the UI
|
||||||
|
self.hass.async_create_task(self._sync_override_state())
|
||||||
250
custom_components/tibber_prices/number/definitions.py
Normal file
250
custom_components/tibber_prices/number/definitions.py
Normal file
|
|
@ -0,0 +1,250 @@
|
||||||
|
"""
|
||||||
|
Number entity definitions for Tibber Prices configuration overrides.
|
||||||
|
|
||||||
|
These number entities allow runtime configuration of Best Price and Peak Price
|
||||||
|
period calculation settings. They are disabled by default - users can enable
|
||||||
|
individual entities to override specific settings at runtime.
|
||||||
|
|
||||||
|
When enabled, the entity value takes precedence over the options flow setting.
|
||||||
|
When disabled (default), the options flow setting is used.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
from homeassistant.components.number import (
|
||||||
|
NumberEntityDescription,
|
||||||
|
NumberMode,
|
||||||
|
)
|
||||||
|
from homeassistant.const import PERCENTAGE, EntityCategory
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True, kw_only=True)
|
||||||
|
class TibberPricesNumberEntityDescription(NumberEntityDescription):
|
||||||
|
"""Describes a Tibber Prices number entity for config overrides."""
|
||||||
|
|
||||||
|
# The config key this entity overrides (matches CONF_* constants)
|
||||||
|
config_key: str
|
||||||
|
# The section in options where this setting is stored (e.g., "flexibility_settings")
|
||||||
|
config_section: str
|
||||||
|
# Whether this is for best_price (False) or peak_price (True)
|
||||||
|
is_peak_price: bool = False
|
||||||
|
# Default value from const.py
|
||||||
|
default_value: float | int = 0
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# BEST PRICE PERIOD CONFIGURATION OVERRIDES
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
BEST_PRICE_NUMBER_ENTITIES = (
|
||||||
|
TibberPricesNumberEntityDescription(
|
||||||
|
key="best_price_flex_override",
|
||||||
|
translation_key="best_price_flex_override",
|
||||||
|
name="Best Price: Flexibility",
|
||||||
|
icon="mdi:arrow-down-bold-circle",
|
||||||
|
entity_category=EntityCategory.CONFIG,
|
||||||
|
entity_registry_enabled_default=False,
|
||||||
|
native_min_value=0,
|
||||||
|
native_max_value=50,
|
||||||
|
native_step=1,
|
||||||
|
native_unit_of_measurement=PERCENTAGE,
|
||||||
|
mode=NumberMode.SLIDER,
|
||||||
|
config_key="best_price_flex",
|
||||||
|
config_section="flexibility_settings",
|
||||||
|
is_peak_price=False,
|
||||||
|
default_value=15, # DEFAULT_BEST_PRICE_FLEX
|
||||||
|
),
|
||||||
|
TibberPricesNumberEntityDescription(
|
||||||
|
key="best_price_min_distance_override",
|
||||||
|
translation_key="best_price_min_distance_override",
|
||||||
|
name="Best Price: Minimum Distance",
|
||||||
|
icon="mdi:arrow-down-bold-circle",
|
||||||
|
entity_category=EntityCategory.CONFIG,
|
||||||
|
entity_registry_enabled_default=False,
|
||||||
|
native_min_value=-50,
|
||||||
|
native_max_value=0,
|
||||||
|
native_step=1,
|
||||||
|
native_unit_of_measurement=PERCENTAGE,
|
||||||
|
mode=NumberMode.SLIDER,
|
||||||
|
config_key="best_price_min_distance_from_avg",
|
||||||
|
config_section="flexibility_settings",
|
||||||
|
is_peak_price=False,
|
||||||
|
default_value=-5, # DEFAULT_BEST_PRICE_MIN_DISTANCE_FROM_AVG
|
||||||
|
),
|
||||||
|
TibberPricesNumberEntityDescription(
|
||||||
|
key="best_price_min_period_length_override",
|
||||||
|
translation_key="best_price_min_period_length_override",
|
||||||
|
name="Best Price: Minimum Period Length",
|
||||||
|
icon="mdi:arrow-down-bold-circle",
|
||||||
|
entity_category=EntityCategory.CONFIG,
|
||||||
|
entity_registry_enabled_default=False,
|
||||||
|
native_min_value=15,
|
||||||
|
native_max_value=180,
|
||||||
|
native_step=15,
|
||||||
|
native_unit_of_measurement="min",
|
||||||
|
mode=NumberMode.SLIDER,
|
||||||
|
config_key="best_price_min_period_length",
|
||||||
|
config_section="period_settings",
|
||||||
|
is_peak_price=False,
|
||||||
|
default_value=60, # DEFAULT_BEST_PRICE_MIN_PERIOD_LENGTH
|
||||||
|
),
|
||||||
|
TibberPricesNumberEntityDescription(
|
||||||
|
key="best_price_min_periods_override",
|
||||||
|
translation_key="best_price_min_periods_override",
|
||||||
|
name="Best Price: Minimum Periods",
|
||||||
|
icon="mdi:arrow-down-bold-circle",
|
||||||
|
entity_category=EntityCategory.CONFIG,
|
||||||
|
entity_registry_enabled_default=False,
|
||||||
|
native_min_value=1,
|
||||||
|
native_max_value=10,
|
||||||
|
native_step=1,
|
||||||
|
mode=NumberMode.SLIDER,
|
||||||
|
config_key="min_periods_best",
|
||||||
|
config_section="relaxation_and_target_periods",
|
||||||
|
is_peak_price=False,
|
||||||
|
default_value=2, # DEFAULT_MIN_PERIODS_BEST
|
||||||
|
),
|
||||||
|
TibberPricesNumberEntityDescription(
|
||||||
|
key="best_price_relaxation_attempts_override",
|
||||||
|
translation_key="best_price_relaxation_attempts_override",
|
||||||
|
name="Best Price: Relaxation Attempts",
|
||||||
|
icon="mdi:arrow-down-bold-circle",
|
||||||
|
entity_category=EntityCategory.CONFIG,
|
||||||
|
entity_registry_enabled_default=False,
|
||||||
|
native_min_value=1,
|
||||||
|
native_max_value=12,
|
||||||
|
native_step=1,
|
||||||
|
mode=NumberMode.SLIDER,
|
||||||
|
config_key="relaxation_attempts_best",
|
||||||
|
config_section="relaxation_and_target_periods",
|
||||||
|
is_peak_price=False,
|
||||||
|
default_value=11, # DEFAULT_RELAXATION_ATTEMPTS_BEST
|
||||||
|
),
|
||||||
|
TibberPricesNumberEntityDescription(
|
||||||
|
key="best_price_gap_count_override",
|
||||||
|
translation_key="best_price_gap_count_override",
|
||||||
|
name="Best Price: Gap Tolerance",
|
||||||
|
icon="mdi:arrow-down-bold-circle",
|
||||||
|
entity_category=EntityCategory.CONFIG,
|
||||||
|
entity_registry_enabled_default=False,
|
||||||
|
native_min_value=0,
|
||||||
|
native_max_value=8,
|
||||||
|
native_step=1,
|
||||||
|
mode=NumberMode.SLIDER,
|
||||||
|
config_key="best_price_max_level_gap_count",
|
||||||
|
config_section="period_settings",
|
||||||
|
is_peak_price=False,
|
||||||
|
default_value=1, # DEFAULT_BEST_PRICE_MAX_LEVEL_GAP_COUNT
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# PEAK PRICE PERIOD CONFIGURATION OVERRIDES
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
PEAK_PRICE_NUMBER_ENTITIES = (
|
||||||
|
TibberPricesNumberEntityDescription(
|
||||||
|
key="peak_price_flex_override",
|
||||||
|
translation_key="peak_price_flex_override",
|
||||||
|
name="Peak Price: Flexibility",
|
||||||
|
icon="mdi:arrow-up-bold-circle",
|
||||||
|
entity_category=EntityCategory.CONFIG,
|
||||||
|
entity_registry_enabled_default=False,
|
||||||
|
native_min_value=-50,
|
||||||
|
native_max_value=0,
|
||||||
|
native_step=1,
|
||||||
|
native_unit_of_measurement=PERCENTAGE,
|
||||||
|
mode=NumberMode.SLIDER,
|
||||||
|
config_key="peak_price_flex",
|
||||||
|
config_section="flexibility_settings",
|
||||||
|
is_peak_price=True,
|
||||||
|
default_value=-20, # DEFAULT_PEAK_PRICE_FLEX
|
||||||
|
),
|
||||||
|
TibberPricesNumberEntityDescription(
|
||||||
|
key="peak_price_min_distance_override",
|
||||||
|
translation_key="peak_price_min_distance_override",
|
||||||
|
name="Peak Price: Minimum Distance",
|
||||||
|
icon="mdi:arrow-up-bold-circle",
|
||||||
|
entity_category=EntityCategory.CONFIG,
|
||||||
|
entity_registry_enabled_default=False,
|
||||||
|
native_min_value=0,
|
||||||
|
native_max_value=50,
|
||||||
|
native_step=1,
|
||||||
|
native_unit_of_measurement=PERCENTAGE,
|
||||||
|
mode=NumberMode.SLIDER,
|
||||||
|
config_key="peak_price_min_distance_from_avg",
|
||||||
|
config_section="flexibility_settings",
|
||||||
|
is_peak_price=True,
|
||||||
|
default_value=5, # DEFAULT_PEAK_PRICE_MIN_DISTANCE_FROM_AVG
|
||||||
|
),
|
||||||
|
TibberPricesNumberEntityDescription(
|
||||||
|
key="peak_price_min_period_length_override",
|
||||||
|
translation_key="peak_price_min_period_length_override",
|
||||||
|
name="Peak Price: Minimum Period Length",
|
||||||
|
icon="mdi:arrow-up-bold-circle",
|
||||||
|
entity_category=EntityCategory.CONFIG,
|
||||||
|
entity_registry_enabled_default=False,
|
||||||
|
native_min_value=15,
|
||||||
|
native_max_value=180,
|
||||||
|
native_step=15,
|
||||||
|
native_unit_of_measurement="min",
|
||||||
|
mode=NumberMode.SLIDER,
|
||||||
|
config_key="peak_price_min_period_length",
|
||||||
|
config_section="period_settings",
|
||||||
|
is_peak_price=True,
|
||||||
|
default_value=30, # DEFAULT_PEAK_PRICE_MIN_PERIOD_LENGTH
|
||||||
|
),
|
||||||
|
TibberPricesNumberEntityDescription(
|
||||||
|
key="peak_price_min_periods_override",
|
||||||
|
translation_key="peak_price_min_periods_override",
|
||||||
|
name="Peak Price: Minimum Periods",
|
||||||
|
icon="mdi:arrow-up-bold-circle",
|
||||||
|
entity_category=EntityCategory.CONFIG,
|
||||||
|
entity_registry_enabled_default=False,
|
||||||
|
native_min_value=1,
|
||||||
|
native_max_value=10,
|
||||||
|
native_step=1,
|
||||||
|
mode=NumberMode.SLIDER,
|
||||||
|
config_key="min_periods_peak",
|
||||||
|
config_section="relaxation_and_target_periods",
|
||||||
|
is_peak_price=True,
|
||||||
|
default_value=2, # DEFAULT_MIN_PERIODS_PEAK
|
||||||
|
),
|
||||||
|
TibberPricesNumberEntityDescription(
|
||||||
|
key="peak_price_relaxation_attempts_override",
|
||||||
|
translation_key="peak_price_relaxation_attempts_override",
|
||||||
|
name="Peak Price: Relaxation Attempts",
|
||||||
|
icon="mdi:arrow-up-bold-circle",
|
||||||
|
entity_category=EntityCategory.CONFIG,
|
||||||
|
entity_registry_enabled_default=False,
|
||||||
|
native_min_value=1,
|
||||||
|
native_max_value=12,
|
||||||
|
native_step=1,
|
||||||
|
mode=NumberMode.SLIDER,
|
||||||
|
config_key="relaxation_attempts_peak",
|
||||||
|
config_section="relaxation_and_target_periods",
|
||||||
|
is_peak_price=True,
|
||||||
|
default_value=11, # DEFAULT_RELAXATION_ATTEMPTS_PEAK
|
||||||
|
),
|
||||||
|
TibberPricesNumberEntityDescription(
|
||||||
|
key="peak_price_gap_count_override",
|
||||||
|
translation_key="peak_price_gap_count_override",
|
||||||
|
name="Peak Price: Gap Tolerance",
|
||||||
|
icon="mdi:arrow-up-bold-circle",
|
||||||
|
entity_category=EntityCategory.CONFIG,
|
||||||
|
entity_registry_enabled_default=False,
|
||||||
|
native_min_value=0,
|
||||||
|
native_max_value=8,
|
||||||
|
native_step=1,
|
||||||
|
mode=NumberMode.SLIDER,
|
||||||
|
config_key="peak_price_max_level_gap_count",
|
||||||
|
config_section="period_settings",
|
||||||
|
is_peak_price=True,
|
||||||
|
default_value=1, # DEFAULT_PEAK_PRICE_MAX_LEVEL_GAP_COUNT
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
# All number entity descriptions combined
|
||||||
|
NUMBER_ENTITY_DESCRIPTIONS = BEST_PRICE_NUMBER_ENTITIES + PEAK_PRICE_NUMBER_ENTITIES
|
||||||
|
|
@ -23,6 +23,72 @@ from .helpers import add_alternate_average_attribute
|
||||||
from .metadata import get_current_interval_data
|
from .metadata import get_current_interval_data
|
||||||
|
|
||||||
|
|
||||||
|
def _get_interval_data_for_attributes(
|
||||||
|
key: str,
|
||||||
|
coordinator: TibberPricesDataUpdateCoordinator,
|
||||||
|
attributes: dict,
|
||||||
|
*,
|
||||||
|
time: TibberPricesTimeService,
|
||||||
|
) -> dict | None:
|
||||||
|
"""
|
||||||
|
Get interval data and set timestamp based on sensor type.
|
||||||
|
|
||||||
|
Refactored to reduce branch complexity in main function.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
key: The sensor entity key
|
||||||
|
coordinator: The data update coordinator
|
||||||
|
attributes: Attributes dict to update with timestamp if needed
|
||||||
|
time: TibberPricesTimeService instance
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Interval data if found, None otherwise
|
||||||
|
|
||||||
|
"""
|
||||||
|
now = time.now()
|
||||||
|
|
||||||
|
# Current/next price sensors - override timestamp with interval's startsAt
|
||||||
|
next_sensors = ["next_interval_price", "next_interval_price_level", "next_interval_price_rating"]
|
||||||
|
prev_sensors = ["previous_interval_price", "previous_interval_price_level", "previous_interval_price_rating"]
|
||||||
|
next_hour = ["next_hour_average_price", "next_hour_price_level", "next_hour_price_rating"]
|
||||||
|
curr_interval = ["current_interval_price", "current_interval_price_base"]
|
||||||
|
curr_hour = ["current_hour_average_price", "current_hour_price_level", "current_hour_price_rating"]
|
||||||
|
|
||||||
|
if key in next_sensors:
|
||||||
|
target_time = time.get_next_interval_start()
|
||||||
|
interval_data = find_price_data_for_interval(coordinator.data, target_time, time=time)
|
||||||
|
if interval_data:
|
||||||
|
attributes["timestamp"] = interval_data["startsAt"]
|
||||||
|
return interval_data
|
||||||
|
|
||||||
|
if key in prev_sensors:
|
||||||
|
target_time = time.get_interval_offset_time(-1)
|
||||||
|
interval_data = find_price_data_for_interval(coordinator.data, target_time, time=time)
|
||||||
|
if interval_data:
|
||||||
|
attributes["timestamp"] = interval_data["startsAt"]
|
||||||
|
return interval_data
|
||||||
|
|
||||||
|
if key in next_hour:
|
||||||
|
target_time = now + timedelta(hours=1)
|
||||||
|
interval_data = find_price_data_for_interval(coordinator.data, target_time, time=time)
|
||||||
|
if interval_data:
|
||||||
|
attributes["timestamp"] = interval_data["startsAt"]
|
||||||
|
return interval_data
|
||||||
|
|
||||||
|
# Current interval sensors (both variants)
|
||||||
|
if key in curr_interval:
|
||||||
|
interval_data = get_current_interval_data(coordinator, time=time)
|
||||||
|
if interval_data and "startsAt" in interval_data:
|
||||||
|
attributes["timestamp"] = interval_data["startsAt"]
|
||||||
|
return interval_data
|
||||||
|
|
||||||
|
# Current hour sensors - keep default timestamp
|
||||||
|
if key in curr_hour:
|
||||||
|
return get_current_interval_data(coordinator, time=time)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def add_current_interval_price_attributes( # noqa: PLR0913
|
def add_current_interval_price_attributes( # noqa: PLR0913
|
||||||
attributes: dict,
|
attributes: dict,
|
||||||
key: str,
|
key: str,
|
||||||
|
|
@ -46,62 +112,16 @@ def add_current_interval_price_attributes( # noqa: PLR0913
|
||||||
config_entry: Config entry for user preferences
|
config_entry: Config entry for user preferences
|
||||||
|
|
||||||
"""
|
"""
|
||||||
now = time.now()
|
# Get interval data and handle timestamp overrides
|
||||||
|
interval_data = _get_interval_data_for_attributes(key, coordinator, attributes, time=time)
|
||||||
# Determine which interval to use based on sensor type
|
|
||||||
next_interval_sensors = [
|
|
||||||
"next_interval_price",
|
|
||||||
"next_interval_price_level",
|
|
||||||
"next_interval_price_rating",
|
|
||||||
]
|
|
||||||
previous_interval_sensors = [
|
|
||||||
"previous_interval_price",
|
|
||||||
"previous_interval_price_level",
|
|
||||||
"previous_interval_price_rating",
|
|
||||||
]
|
|
||||||
next_hour_sensors = [
|
|
||||||
"next_hour_average_price",
|
|
||||||
"next_hour_price_level",
|
|
||||||
"next_hour_price_rating",
|
|
||||||
]
|
|
||||||
current_hour_sensors = [
|
|
||||||
"current_hour_average_price",
|
|
||||||
"current_hour_price_level",
|
|
||||||
"current_hour_price_rating",
|
|
||||||
]
|
|
||||||
|
|
||||||
# Set interval data based on sensor type
|
|
||||||
# For sensors showing data from OTHER intervals (next/previous), override timestamp with that interval's startsAt
|
|
||||||
# For current interval sensors, keep the default platform timestamp (calculation time)
|
|
||||||
interval_data = None
|
|
||||||
if key in next_interval_sensors:
|
|
||||||
target_time = time.get_next_interval_start()
|
|
||||||
interval_data = find_price_data_for_interval(coordinator.data, target_time, time=time)
|
|
||||||
# Override timestamp with the NEXT interval's startsAt (when that interval starts)
|
|
||||||
if interval_data:
|
|
||||||
attributes["timestamp"] = interval_data["startsAt"]
|
|
||||||
elif key in previous_interval_sensors:
|
|
||||||
target_time = time.get_interval_offset_time(-1)
|
|
||||||
interval_data = find_price_data_for_interval(coordinator.data, target_time, time=time)
|
|
||||||
# Override timestamp with the PREVIOUS interval's startsAt
|
|
||||||
if interval_data:
|
|
||||||
attributes["timestamp"] = interval_data["startsAt"]
|
|
||||||
elif key in next_hour_sensors:
|
|
||||||
target_time = now + timedelta(hours=1)
|
|
||||||
interval_data = find_price_data_for_interval(coordinator.data, target_time, time=time)
|
|
||||||
# Override timestamp with the center of the next rolling hour window
|
|
||||||
if interval_data:
|
|
||||||
attributes["timestamp"] = interval_data["startsAt"]
|
|
||||||
elif key in current_hour_sensors:
|
|
||||||
current_interval_data = get_current_interval_data(coordinator, time=time)
|
|
||||||
# Keep default timestamp (when calculation was made) for current hour sensors
|
|
||||||
else:
|
|
||||||
current_interval_data = get_current_interval_data(coordinator, time=time)
|
|
||||||
interval_data = current_interval_data # Use current_interval_data as interval_data for current_interval_price
|
|
||||||
# Keep default timestamp (current calculation time) for current interval sensors
|
|
||||||
|
|
||||||
# Add icon_color for price sensors (based on their price level)
|
# Add icon_color for price sensors (based on their price level)
|
||||||
if key in ["current_interval_price", "next_interval_price", "previous_interval_price"]:
|
if key in [
|
||||||
|
"current_interval_price",
|
||||||
|
"current_interval_price_base",
|
||||||
|
"next_interval_price",
|
||||||
|
"previous_interval_price",
|
||||||
|
]:
|
||||||
# For interval-based price sensors, get level from interval_data
|
# For interval-based price sensors, get level from interval_data
|
||||||
if interval_data and "level" in interval_data:
|
if interval_data and "level" in interval_data:
|
||||||
level = interval_data["level"]
|
level = interval_data["level"]
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,24 @@
|
||||||
"""Attribute builders for lifecycle diagnostic sensor."""
|
"""
|
||||||
|
Attribute builders for lifecycle diagnostic sensor.
|
||||||
|
|
||||||
|
This sensor uses event-based updates with state-change filtering to minimize
|
||||||
|
recorder entries. Only attributes that are relevant to the lifecycle STATE
|
||||||
|
are included here - attributes that change independently of state belong
|
||||||
|
in a separate sensor or diagnostics.
|
||||||
|
|
||||||
|
Included attributes (update only on state change):
|
||||||
|
- tomorrow_available: Whether tomorrow's price data is available
|
||||||
|
- next_api_poll: When the next API poll will occur (builds user trust)
|
||||||
|
- updates_today: Number of API calls made today
|
||||||
|
- last_turnover: When the last midnight turnover occurred
|
||||||
|
- last_error: Details of the last error (if any)
|
||||||
|
|
||||||
|
Pool statistics (sensor_intervals_count, cache_fill_percent, etc.) are
|
||||||
|
intentionally NOT included here because they change independently of
|
||||||
|
the lifecycle state. With state-change filtering, these would become
|
||||||
|
stale. Pool statistics are available via diagnostics or could be
|
||||||
|
exposed as a separate sensor if needed.
|
||||||
|
"""
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
|
@ -13,11 +33,6 @@ if TYPE_CHECKING:
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
# Constants for cache age formatting
|
|
||||||
MINUTES_PER_HOUR = 60
|
|
||||||
MINUTES_PER_DAY = 1440 # 24 * 60
|
|
||||||
|
|
||||||
|
|
||||||
def build_lifecycle_attributes(
|
def build_lifecycle_attributes(
|
||||||
coordinator: TibberPricesDataUpdateCoordinator,
|
coordinator: TibberPricesDataUpdateCoordinator,
|
||||||
lifecycle_calculator: TibberPricesLifecycleCalculator,
|
lifecycle_calculator: TibberPricesLifecycleCalculator,
|
||||||
|
|
@ -25,7 +40,11 @@ def build_lifecycle_attributes(
|
||||||
"""
|
"""
|
||||||
Build attributes for data_lifecycle_status sensor.
|
Build attributes for data_lifecycle_status sensor.
|
||||||
|
|
||||||
Shows comprehensive cache status, data availability, and update timing.
|
Event-based updates with state-change filtering - attributes only update
|
||||||
|
when the lifecycle STATE changes (fresh→cached, cached→turnover_pending, etc.).
|
||||||
|
|
||||||
|
Only includes attributes that are directly relevant to the lifecycle state.
|
||||||
|
Pool statistics are intentionally excluded to avoid stale data.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dict with lifecycle attributes
|
Dict with lifecycle attributes
|
||||||
|
|
@ -33,57 +52,31 @@ def build_lifecycle_attributes(
|
||||||
"""
|
"""
|
||||||
attributes: dict[str, Any] = {}
|
attributes: dict[str, Any] = {}
|
||||||
|
|
||||||
# Cache Status (formatted for readability)
|
# === Tomorrow Data Status ===
|
||||||
cache_age = lifecycle_calculator.get_cache_age_minutes()
|
# Critical for understanding lifecycle state transitions
|
||||||
if cache_age is not None:
|
attributes["tomorrow_available"] = lifecycle_calculator.has_tomorrow_data()
|
||||||
# Format cache age with units for better readability
|
|
||||||
if cache_age < MINUTES_PER_HOUR:
|
|
||||||
attributes["cache_age"] = f"{cache_age} min"
|
|
||||||
elif cache_age < MINUTES_PER_DAY: # Less than 24 hours
|
|
||||||
hours = cache_age // MINUTES_PER_HOUR
|
|
||||||
minutes = cache_age % MINUTES_PER_HOUR
|
|
||||||
attributes["cache_age"] = f"{hours}h {minutes}min" if minutes > 0 else f"{hours}h"
|
|
||||||
else: # 24+ hours
|
|
||||||
days = cache_age // MINUTES_PER_DAY
|
|
||||||
hours = (cache_age % MINUTES_PER_DAY) // MINUTES_PER_HOUR
|
|
||||||
attributes["cache_age"] = f"{days}d {hours}h" if hours > 0 else f"{days}d"
|
|
||||||
|
|
||||||
# Keep raw value for automations
|
# === Next API Poll Time ===
|
||||||
attributes["cache_age_minutes"] = cache_age
|
# Builds user trust: shows when the integration will check for tomorrow data
|
||||||
|
# - Before 13:00: Shows today 13:00 (when tomorrow-search begins)
|
||||||
cache_validity = lifecycle_calculator.get_cache_validity_status()
|
# - After 13:00 without tomorrow data: Shows next Timer #1 execution (active polling)
|
||||||
attributes["cache_validity"] = cache_validity
|
# - After 13:00 with tomorrow data: Shows tomorrow 13:00 (predictive)
|
||||||
|
|
||||||
# Use single "last_update" field instead of duplicating as "last_api_fetch" and "last_cache_update"
|
|
||||||
if coordinator._last_price_update: # noqa: SLF001 - Internal state access for diagnostic display
|
|
||||||
attributes["last_update"] = coordinator._last_price_update.isoformat() # noqa: SLF001
|
|
||||||
|
|
||||||
# Data Availability & Completeness
|
|
||||||
data_completeness = lifecycle_calculator.get_data_completeness_status()
|
|
||||||
attributes["data_completeness"] = data_completeness
|
|
||||||
|
|
||||||
attributes["yesterday_available"] = lifecycle_calculator.is_data_available(-1)
|
|
||||||
attributes["today_available"] = lifecycle_calculator.is_data_available(0)
|
|
||||||
attributes["tomorrow_available"] = lifecycle_calculator.is_data_available(1)
|
|
||||||
attributes["tomorrow_expected_after"] = "13:00"
|
|
||||||
|
|
||||||
# Next Actions (only show if meaningful)
|
|
||||||
next_poll = lifecycle_calculator.get_next_api_poll_time()
|
next_poll = lifecycle_calculator.get_next_api_poll_time()
|
||||||
if next_poll: # None means data is complete, no more polls needed
|
if next_poll:
|
||||||
attributes["next_api_poll"] = next_poll.isoformat()
|
attributes["next_api_poll"] = next_poll.isoformat()
|
||||||
|
|
||||||
next_midnight = lifecycle_calculator.get_next_midnight_turnover_time()
|
# === Update Statistics ===
|
||||||
attributes["next_midnight_turnover"] = next_midnight.isoformat()
|
# Shows API activity - resets at midnight with turnover
|
||||||
|
|
||||||
# Update Statistics
|
|
||||||
api_calls = lifecycle_calculator.get_api_calls_today()
|
api_calls = lifecycle_calculator.get_api_calls_today()
|
||||||
attributes["updates_today"] = api_calls
|
attributes["updates_today"] = api_calls
|
||||||
|
|
||||||
# Last Turnover Time (from midnight handler)
|
# === Midnight Turnover Info ===
|
||||||
if coordinator._midnight_handler.last_turnover_time: # noqa: SLF001 - Internal state access for diagnostic display
|
# When was the last successful data rotation
|
||||||
|
if coordinator._midnight_handler.last_turnover_time: # noqa: SLF001
|
||||||
attributes["last_turnover"] = coordinator._midnight_handler.last_turnover_time.isoformat() # noqa: SLF001
|
attributes["last_turnover"] = coordinator._midnight_handler.last_turnover_time.isoformat() # noqa: SLF001
|
||||||
|
|
||||||
# Last Error (if any)
|
# === Error Status ===
|
||||||
|
# Present only when there's an active error
|
||||||
if coordinator.last_exception:
|
if coordinator.last_exception:
|
||||||
attributes["last_error"] = str(coordinator.last_exception)
|
attributes["last_error"] = str(coordinator.last_exception)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -13,6 +13,17 @@ if TYPE_CHECKING:
|
||||||
TIMER_30_SEC_BOUNDARY = 30
|
TIMER_30_SEC_BOUNDARY = 30
|
||||||
|
|
||||||
|
|
||||||
|
def _hours_to_minutes(state_value: Any) -> int | None:
|
||||||
|
"""Convert hour-based state back to rounded minutes for attributes."""
|
||||||
|
if state_value is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
return round(float(state_value) * 60)
|
||||||
|
except (TypeError, ValueError):
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def _is_timing_or_volatility_sensor(key: str) -> bool:
|
def _is_timing_or_volatility_sensor(key: str) -> bool:
|
||||||
"""Check if sensor is a timing or volatility sensor."""
|
"""Check if sensor is a timing or volatility sensor."""
|
||||||
return key.endswith("_volatility") or (
|
return key.endswith("_volatility") or (
|
||||||
|
|
@ -69,5 +80,16 @@ def add_period_timing_attributes(
|
||||||
|
|
||||||
attributes["timestamp"] = timestamp
|
attributes["timestamp"] = timestamp
|
||||||
|
|
||||||
|
# Add minute-precision attributes for hour-based states to keep automation-friendly values
|
||||||
|
minute_value = _hours_to_minutes(state_value)
|
||||||
|
|
||||||
|
if minute_value is not None:
|
||||||
|
if key.endswith("period_duration"):
|
||||||
|
attributes["period_duration_minutes"] = minute_value
|
||||||
|
elif key.endswith("remaining_minutes"):
|
||||||
|
attributes["remaining_minutes"] = minute_value
|
||||||
|
elif key.endswith("next_in_minutes"):
|
||||||
|
attributes["next_in_minutes"] = minute_value
|
||||||
|
|
||||||
# Add icon_color for dynamic styling
|
# Add icon_color for dynamic styling
|
||||||
add_icon_color_attribute(attributes, key=key, state_value=state_value)
|
add_icon_color_attribute(attributes, key=key, state_value=state_value)
|
||||||
|
|
|
||||||
|
|
@ -2,11 +2,7 @@
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from datetime import timedelta
|
from datetime import datetime, timedelta
|
||||||
from typing import TYPE_CHECKING
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.coordinator.constants import UPDATE_INTERVAL
|
from custom_components.tibber_prices.coordinator.constants import UPDATE_INTERVAL
|
||||||
|
|
||||||
|
|
@ -17,10 +13,6 @@ FRESH_DATA_THRESHOLD_MINUTES = 5 # Data is "fresh" within 5 minutes of API fetc
|
||||||
TOMORROW_CHECK_HOUR = 13 # After 13:00, we actively check for tomorrow data
|
TOMORROW_CHECK_HOUR = 13 # After 13:00, we actively check for tomorrow data
|
||||||
TURNOVER_WARNING_SECONDS = 900 # Warn 15 minutes before midnight (last quarter-hour: 23:45-00:00)
|
TURNOVER_WARNING_SECONDS = 900 # Warn 15 minutes before midnight (last quarter-hour: 23:45-00:00)
|
||||||
|
|
||||||
# Constants for 15-minute update boundaries (Timer #1)
|
|
||||||
QUARTER_HOUR_BOUNDARIES = [0, 15, 30, 45] # Minutes when Timer #1 can trigger
|
|
||||||
LAST_HOUR_OF_DAY = 23
|
|
||||||
|
|
||||||
|
|
||||||
class TibberPricesLifecycleCalculator(TibberPricesBaseCalculator):
|
class TibberPricesLifecycleCalculator(TibberPricesBaseCalculator):
|
||||||
"""Calculate data lifecycle status and metadata."""
|
"""Calculate data lifecycle status and metadata."""
|
||||||
|
|
@ -82,15 +74,6 @@ class TibberPricesLifecycleCalculator(TibberPricesBaseCalculator):
|
||||||
# Priority 6: Default - using cached data
|
# Priority 6: Default - using cached data
|
||||||
return "cached"
|
return "cached"
|
||||||
|
|
||||||
def get_cache_age_minutes(self) -> int | None:
|
|
||||||
"""Calculate how many minutes old the cached data is."""
|
|
||||||
coordinator = self.coordinator
|
|
||||||
if not coordinator._last_price_update: # noqa: SLF001 - Internal state access for lifecycle tracking
|
|
||||||
return None
|
|
||||||
|
|
||||||
age = coordinator.time.now() - coordinator._last_price_update # noqa: SLF001
|
|
||||||
return int(age.total_seconds() / 60)
|
|
||||||
|
|
||||||
def get_next_api_poll_time(self) -> datetime | None:
|
def get_next_api_poll_time(self) -> datetime | None:
|
||||||
"""
|
"""
|
||||||
Calculate when the next API poll attempt will occur.
|
Calculate when the next API poll attempt will occur.
|
||||||
|
|
@ -179,117 +162,6 @@ class TibberPricesLifecycleCalculator(TibberPricesBaseCalculator):
|
||||||
# Fallback: If we don't know timer offset yet, assume 13:00:00
|
# Fallback: If we don't know timer offset yet, assume 13:00:00
|
||||||
return tomorrow_13
|
return tomorrow_13
|
||||||
|
|
||||||
def get_next_midnight_turnover_time(self) -> datetime:
|
|
||||||
"""Calculate when the next midnight turnover will occur."""
|
|
||||||
coordinator = self.coordinator
|
|
||||||
current_time = coordinator.time.now()
|
|
||||||
now_local = coordinator.time.as_local(current_time)
|
|
||||||
|
|
||||||
# Next midnight
|
|
||||||
return now_local.replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(days=1)
|
|
||||||
|
|
||||||
def is_data_available(self, day_offset: int) -> bool:
|
|
||||||
"""
|
|
||||||
Check if data is available for a specific day.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
day_offset: Day offset (-1=yesterday, 0=today, 1=tomorrow)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if data exists and is not empty
|
|
||||||
|
|
||||||
"""
|
|
||||||
if not self.has_data():
|
|
||||||
return False
|
|
||||||
|
|
||||||
day_data = self.get_intervals(day_offset)
|
|
||||||
return bool(day_data)
|
|
||||||
|
|
||||||
def get_data_completeness_status(self) -> str:
|
|
||||||
"""
|
|
||||||
Get human-readable data completeness status.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
'complete': All data (yesterday/today/tomorrow) available
|
|
||||||
'missing_tomorrow': Only yesterday and today available
|
|
||||||
'missing_yesterday': Only today and tomorrow available
|
|
||||||
'partial': Only today or some other partial combination
|
|
||||||
'no_data': No data available at all
|
|
||||||
|
|
||||||
"""
|
|
||||||
yesterday_available = self.is_data_available(-1)
|
|
||||||
today_available = self.is_data_available(0)
|
|
||||||
tomorrow_available = self.is_data_available(1)
|
|
||||||
|
|
||||||
if yesterday_available and today_available and tomorrow_available:
|
|
||||||
return "complete"
|
|
||||||
if yesterday_available and today_available and not tomorrow_available:
|
|
||||||
return "missing_tomorrow"
|
|
||||||
if not yesterday_available and today_available and tomorrow_available:
|
|
||||||
return "missing_yesterday"
|
|
||||||
if today_available:
|
|
||||||
return "partial"
|
|
||||||
return "no_data"
|
|
||||||
|
|
||||||
def get_cache_validity_status(self) -> str:
|
|
||||||
"""
|
|
||||||
Get cache validity status.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
"valid": Cache is current and matches today's date
|
|
||||||
"stale": Cache exists but is outdated
|
|
||||||
"date_mismatch": Cache is from a different day
|
|
||||||
"empty": No cache data
|
|
||||||
|
|
||||||
"""
|
|
||||||
coordinator = self.coordinator
|
|
||||||
# Check if coordinator has data (transformed, ready for entities)
|
|
||||||
if not self.has_data():
|
|
||||||
return "empty"
|
|
||||||
|
|
||||||
# Check if we have price update timestamp
|
|
||||||
if not coordinator._last_price_update: # noqa: SLF001 - Internal state access for lifecycle tracking
|
|
||||||
return "empty"
|
|
||||||
|
|
||||||
current_time = coordinator.time.now()
|
|
||||||
current_local_date = coordinator.time.as_local(current_time).date()
|
|
||||||
last_update_local_date = coordinator.time.as_local(coordinator._last_price_update).date() # noqa: SLF001
|
|
||||||
|
|
||||||
if current_local_date != last_update_local_date:
|
|
||||||
return "date_mismatch"
|
|
||||||
|
|
||||||
# Check if cache is stale (older than expected)
|
|
||||||
# CRITICAL: After midnight turnover, _last_price_update is set to 00:00
|
|
||||||
# without new API data. The data is still valid (rotated yesterday→today).
|
|
||||||
#
|
|
||||||
# Cache is considered "valid" if EITHER:
|
|
||||||
# 1. Within normal update interval expectations (age ≤ 2 hours), OR
|
|
||||||
# 2. Coordinator update cycle ran recently (within last 30 minutes)
|
|
||||||
#
|
|
||||||
# Why check _last_coordinator_update?
|
|
||||||
# - After midnight turnover, _last_price_update stays at 00:00
|
|
||||||
# - But coordinator polls every 15 minutes and validates cache
|
|
||||||
# - If coordinator ran recently, cache was checked and deemed valid
|
|
||||||
# - This prevents false "stale" status when using rotated data
|
|
||||||
|
|
||||||
age = current_time - coordinator._last_price_update # noqa: SLF001
|
|
||||||
|
|
||||||
# If cache age is within normal expectations (≤2 hours), it's valid
|
|
||||||
if age <= timedelta(hours=2):
|
|
||||||
return "valid"
|
|
||||||
|
|
||||||
# Cache is older than 2 hours - check if coordinator validated it recently
|
|
||||||
# If coordinator ran within last 30 minutes, cache is considered current
|
|
||||||
# (even if _last_price_update is older, e.g., from midnight turnover)
|
|
||||||
if coordinator._last_coordinator_update: # noqa: SLF001 - Internal state access
|
|
||||||
time_since_coordinator_check = current_time - coordinator._last_coordinator_update # noqa: SLF001
|
|
||||||
if time_since_coordinator_check <= timedelta(minutes=30):
|
|
||||||
# Coordinator validated cache recently - it's current
|
|
||||||
return "valid"
|
|
||||||
|
|
||||||
# Cache is old AND coordinator hasn't validated recently - stale
|
|
||||||
return "stale"
|
|
||||||
|
|
||||||
def get_api_calls_today(self) -> int:
|
def get_api_calls_today(self) -> int:
|
||||||
"""Get the number of API calls made today."""
|
"""Get the number of API calls made today."""
|
||||||
coordinator = self.coordinator
|
coordinator = self.coordinator
|
||||||
|
|
@ -300,3 +172,13 @@ class TibberPricesLifecycleCalculator(TibberPricesBaseCalculator):
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
return coordinator._api_calls_today # noqa: SLF001
|
return coordinator._api_calls_today # noqa: SLF001
|
||||||
|
|
||||||
|
def has_tomorrow_data(self) -> bool:
|
||||||
|
"""
|
||||||
|
Check if tomorrow's price data is available.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if tomorrow data exists in the pool.
|
||||||
|
|
||||||
|
"""
|
||||||
|
return not self.coordinator._needs_tomorrow_data() # noqa: SLF001
|
||||||
|
|
|
||||||
|
|
@ -105,6 +105,8 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
# Get configured thresholds from options
|
# Get configured thresholds from options
|
||||||
threshold_rising = self.config.get("price_trend_threshold_rising", 5.0)
|
threshold_rising = self.config.get("price_trend_threshold_rising", 5.0)
|
||||||
threshold_falling = self.config.get("price_trend_threshold_falling", -5.0)
|
threshold_falling = self.config.get("price_trend_threshold_falling", -5.0)
|
||||||
|
threshold_strongly_rising = self.config.get("price_trend_threshold_strongly_rising", 6.0)
|
||||||
|
threshold_strongly_falling = self.config.get("price_trend_threshold_strongly_falling", -6.0)
|
||||||
volatility_threshold_moderate = self.config.get("volatility_threshold_moderate", 15.0)
|
volatility_threshold_moderate = self.config.get("volatility_threshold_moderate", 15.0)
|
||||||
volatility_threshold_high = self.config.get("volatility_threshold_high", 30.0)
|
volatility_threshold_high = self.config.get("volatility_threshold_high", 30.0)
|
||||||
|
|
||||||
|
|
@ -115,11 +117,13 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
lookahead_intervals = self.coordinator.time.minutes_to_intervals(hours * 60)
|
lookahead_intervals = self.coordinator.time.minutes_to_intervals(hours * 60)
|
||||||
|
|
||||||
# Calculate trend with volatility-adaptive thresholds
|
# Calculate trend with volatility-adaptive thresholds
|
||||||
trend_state, diff_pct = calculate_price_trend(
|
trend_state, diff_pct, trend_value = calculate_price_trend(
|
||||||
current_interval_price,
|
current_interval_price,
|
||||||
future_mean,
|
future_mean,
|
||||||
threshold_rising=threshold_rising,
|
threshold_rising=threshold_rising,
|
||||||
threshold_falling=threshold_falling,
|
threshold_falling=threshold_falling,
|
||||||
|
threshold_strongly_rising=threshold_strongly_rising,
|
||||||
|
threshold_strongly_falling=threshold_strongly_falling,
|
||||||
volatility_adjustment=True, # Always enabled
|
volatility_adjustment=True, # Always enabled
|
||||||
lookahead_intervals=lookahead_intervals,
|
lookahead_intervals=lookahead_intervals,
|
||||||
all_intervals=all_intervals,
|
all_intervals=all_intervals,
|
||||||
|
|
@ -127,11 +131,14 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
volatility_threshold_high=volatility_threshold_high,
|
volatility_threshold_high=volatility_threshold_high,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Determine icon color based on trend state
|
# Determine icon color based on trend state (5-level scale)
|
||||||
|
# Strongly rising/falling uses more intense colors
|
||||||
icon_color = {
|
icon_color = {
|
||||||
"rising": "var(--error-color)", # Red/Orange for rising prices (expensive)
|
"strongly_rising": "var(--error-color)", # Red for strongly rising (very expensive)
|
||||||
"falling": "var(--success-color)", # Green for falling prices (cheaper)
|
"rising": "var(--warning-color)", # Orange/Yellow for rising prices
|
||||||
"stable": "var(--state-icon-color)", # Default gray for stable prices
|
"stable": "var(--state-icon-color)", # Default gray for stable prices
|
||||||
|
"falling": "var(--success-color)", # Green for falling prices (cheaper)
|
||||||
|
"strongly_falling": "var(--success-color)", # Green for strongly falling (great deal)
|
||||||
}.get(trend_state, "var(--state-icon-color)")
|
}.get(trend_state, "var(--state-icon-color)")
|
||||||
|
|
||||||
# Convert prices to display currency unit based on configuration
|
# Convert prices to display currency unit based on configuration
|
||||||
|
|
@ -140,6 +147,7 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
# Store attributes in sensor-specific dictionary AND cache the trend value
|
# Store attributes in sensor-specific dictionary AND cache the trend value
|
||||||
self._trend_attributes = {
|
self._trend_attributes = {
|
||||||
"timestamp": next_interval_start,
|
"timestamp": next_interval_start,
|
||||||
|
"trend_value": trend_value,
|
||||||
f"trend_{hours}h_%": round(diff_pct, 1),
|
f"trend_{hours}h_%": round(diff_pct, 1),
|
||||||
f"next_{hours}h_avg": round(future_mean * factor, 2),
|
f"next_{hours}h_avg": round(future_mean * factor, 2),
|
||||||
"interval_count": lookahead_intervals,
|
"interval_count": lookahead_intervals,
|
||||||
|
|
@ -414,6 +422,8 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
return {
|
return {
|
||||||
"rising": self.config.get("price_trend_threshold_rising", 5.0),
|
"rising": self.config.get("price_trend_threshold_rising", 5.0),
|
||||||
"falling": self.config.get("price_trend_threshold_falling", -5.0),
|
"falling": self.config.get("price_trend_threshold_falling", -5.0),
|
||||||
|
"strongly_rising": self.config.get("price_trend_threshold_strongly_rising", 6.0),
|
||||||
|
"strongly_falling": self.config.get("price_trend_threshold_strongly_falling", -6.0),
|
||||||
"moderate": self.config.get("volatility_threshold_moderate", 15.0),
|
"moderate": self.config.get("volatility_threshold_moderate", 15.0),
|
||||||
"high": self.config.get("volatility_threshold_high", 30.0),
|
"high": self.config.get("volatility_threshold_high", 30.0),
|
||||||
}
|
}
|
||||||
|
|
@ -428,7 +438,7 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
current_index: Index of current interval
|
current_index: Index of current interval
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Momentum direction: "rising", "falling", or "stable"
|
Momentum direction: "strongly_rising", "rising", "stable", "falling", or "strongly_falling"
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Look back 1 hour (4 intervals) for quick reaction
|
# Look back 1 hour (4 intervals) for quick reaction
|
||||||
|
|
@ -451,15 +461,25 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
weighted_sum = sum(price * weight for price, weight in zip(trailing_prices, weights, strict=True))
|
weighted_sum = sum(price * weight for price, weight in zip(trailing_prices, weights, strict=True))
|
||||||
weighted_avg = weighted_sum / sum(weights)
|
weighted_avg = weighted_sum / sum(weights)
|
||||||
|
|
||||||
# Calculate momentum with 3% threshold
|
# Calculate momentum with thresholds
|
||||||
|
# Using same logic as 5-level trend: 3% for normal, 6% (2x) for strong
|
||||||
momentum_threshold = 0.03
|
momentum_threshold = 0.03
|
||||||
diff = (current_price - weighted_avg) / weighted_avg
|
strong_momentum_threshold = 0.06
|
||||||
|
diff = (current_price - weighted_avg) / abs(weighted_avg) if weighted_avg != 0 else 0
|
||||||
|
|
||||||
if diff > momentum_threshold:
|
# Determine momentum level based on thresholds
|
||||||
return "rising"
|
if diff >= strong_momentum_threshold:
|
||||||
if diff < -momentum_threshold:
|
momentum = "strongly_rising"
|
||||||
return "falling"
|
elif diff > momentum_threshold:
|
||||||
return "stable"
|
momentum = "rising"
|
||||||
|
elif diff <= -strong_momentum_threshold:
|
||||||
|
momentum = "strongly_falling"
|
||||||
|
elif diff < -momentum_threshold:
|
||||||
|
momentum = "falling"
|
||||||
|
else:
|
||||||
|
momentum = "stable"
|
||||||
|
|
||||||
|
return momentum
|
||||||
|
|
||||||
def _combine_momentum_with_future(
|
def _combine_momentum_with_future(
|
||||||
self,
|
self,
|
||||||
|
|
@ -472,43 +492,60 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
"""
|
"""
|
||||||
Combine momentum analysis with future outlook to determine final trend.
|
Combine momentum analysis with future outlook to determine final trend.
|
||||||
|
|
||||||
|
Uses 5-level scale: strongly_rising, rising, stable, falling, strongly_falling.
|
||||||
|
Momentum intensity is preserved when future confirms the trend direction.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
current_momentum: Current momentum direction (rising/falling/stable)
|
current_momentum: Current momentum direction (5-level scale)
|
||||||
current_price: Current interval price
|
current_price: Current interval price
|
||||||
future_mean: Average price in future window
|
future_mean: Average price in future window
|
||||||
context: Dict with all_intervals, current_index, lookahead_intervals, thresholds
|
context: Dict with all_intervals, current_index, lookahead_intervals, thresholds
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Final trend direction: "rising", "falling", or "stable"
|
Final trend direction (5-level scale)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if current_momentum == "rising":
|
# Use calculate_price_trend for consistency with 5-level logic
|
||||||
# We're in uptrend - does it continue?
|
|
||||||
return "rising" if future_mean >= current_price * 0.98 else "falling"
|
|
||||||
|
|
||||||
if current_momentum == "falling":
|
|
||||||
# We're in downtrend - does it continue?
|
|
||||||
return "falling" if future_mean <= current_price * 1.02 else "rising"
|
|
||||||
|
|
||||||
# current_momentum == "stable" - what's coming?
|
|
||||||
all_intervals = context["all_intervals"]
|
all_intervals = context["all_intervals"]
|
||||||
current_index = context["current_index"]
|
current_index = context["current_index"]
|
||||||
lookahead_intervals = context["lookahead_intervals"]
|
lookahead_intervals = context["lookahead_intervals"]
|
||||||
thresholds = context["thresholds"]
|
thresholds = context["thresholds"]
|
||||||
|
|
||||||
lookahead_for_volatility = all_intervals[current_index : current_index + lookahead_intervals]
|
lookahead_for_volatility = all_intervals[current_index : current_index + lookahead_intervals]
|
||||||
trend_state, _ = calculate_price_trend(
|
future_trend, _, _ = calculate_price_trend(
|
||||||
current_price,
|
current_price,
|
||||||
future_mean,
|
future_mean,
|
||||||
threshold_rising=thresholds["rising"],
|
threshold_rising=thresholds["rising"],
|
||||||
threshold_falling=thresholds["falling"],
|
threshold_falling=thresholds["falling"],
|
||||||
|
threshold_strongly_rising=thresholds["strongly_rising"],
|
||||||
|
threshold_strongly_falling=thresholds["strongly_falling"],
|
||||||
volatility_adjustment=True,
|
volatility_adjustment=True,
|
||||||
lookahead_intervals=lookahead_intervals,
|
lookahead_intervals=lookahead_intervals,
|
||||||
all_intervals=lookahead_for_volatility,
|
all_intervals=lookahead_for_volatility,
|
||||||
volatility_threshold_moderate=thresholds["moderate"],
|
volatility_threshold_moderate=thresholds["moderate"],
|
||||||
volatility_threshold_high=thresholds["high"],
|
volatility_threshold_high=thresholds["high"],
|
||||||
)
|
)
|
||||||
return trend_state
|
|
||||||
|
# Check if momentum and future trend are aligned (same direction)
|
||||||
|
momentum_rising = current_momentum in ("rising", "strongly_rising")
|
||||||
|
momentum_falling = current_momentum in ("falling", "strongly_falling")
|
||||||
|
future_rising = future_trend in ("rising", "strongly_rising")
|
||||||
|
future_falling = future_trend in ("falling", "strongly_falling")
|
||||||
|
|
||||||
|
if momentum_rising and future_rising:
|
||||||
|
# Both indicate rising - use the stronger signal
|
||||||
|
if current_momentum == "strongly_rising" or future_trend == "strongly_rising":
|
||||||
|
return "strongly_rising"
|
||||||
|
return "rising"
|
||||||
|
|
||||||
|
if momentum_falling and future_falling:
|
||||||
|
# Both indicate falling - use the stronger signal
|
||||||
|
if current_momentum == "strongly_falling" or future_trend == "strongly_falling":
|
||||||
|
return "strongly_falling"
|
||||||
|
return "falling"
|
||||||
|
|
||||||
|
# Conflicting signals or stable momentum - trust future trend calculation
|
||||||
|
return future_trend
|
||||||
|
|
||||||
def _calculate_standard_trend(
|
def _calculate_standard_trend(
|
||||||
self,
|
self,
|
||||||
|
|
@ -534,11 +571,13 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
current_price = float(current_interval["total"])
|
current_price = float(current_interval["total"])
|
||||||
|
|
||||||
standard_lookahead_volatility = all_intervals[current_index : current_index + standard_lookahead]
|
standard_lookahead_volatility = all_intervals[current_index : current_index + standard_lookahead]
|
||||||
current_trend_3h, _ = calculate_price_trend(
|
current_trend_3h, _, _ = calculate_price_trend(
|
||||||
current_price,
|
current_price,
|
||||||
standard_future_mean,
|
standard_future_mean,
|
||||||
threshold_rising=thresholds["rising"],
|
threshold_rising=thresholds["rising"],
|
||||||
threshold_falling=thresholds["falling"],
|
threshold_falling=thresholds["falling"],
|
||||||
|
threshold_strongly_rising=thresholds["strongly_rising"],
|
||||||
|
threshold_strongly_falling=thresholds["strongly_falling"],
|
||||||
volatility_adjustment=True,
|
volatility_adjustment=True,
|
||||||
lookahead_intervals=standard_lookahead,
|
lookahead_intervals=standard_lookahead,
|
||||||
all_intervals=standard_lookahead_volatility,
|
all_intervals=standard_lookahead_volatility,
|
||||||
|
|
@ -606,11 +645,13 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
|
|
||||||
# Calculate trend at this past point
|
# Calculate trend at this past point
|
||||||
lookahead_for_volatility = all_intervals[i : i + intervals_in_3h]
|
lookahead_for_volatility = all_intervals[i : i + intervals_in_3h]
|
||||||
trend_state, _ = calculate_price_trend(
|
trend_state, _, _ = calculate_price_trend(
|
||||||
price,
|
price,
|
||||||
future_mean,
|
future_mean,
|
||||||
threshold_rising=thresholds["rising"],
|
threshold_rising=thresholds["rising"],
|
||||||
threshold_falling=thresholds["falling"],
|
threshold_falling=thresholds["falling"],
|
||||||
|
threshold_strongly_rising=thresholds["strongly_rising"],
|
||||||
|
threshold_strongly_falling=thresholds["strongly_falling"],
|
||||||
volatility_adjustment=True,
|
volatility_adjustment=True,
|
||||||
lookahead_intervals=intervals_in_3h,
|
lookahead_intervals=intervals_in_3h,
|
||||||
all_intervals=lookahead_for_volatility,
|
all_intervals=lookahead_for_volatility,
|
||||||
|
|
@ -678,11 +719,13 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
|
|
||||||
# Calculate trend at this future point
|
# Calculate trend at this future point
|
||||||
lookahead_for_volatility = all_intervals[i : i + intervals_in_3h]
|
lookahead_for_volatility = all_intervals[i : i + intervals_in_3h]
|
||||||
trend_state, _ = calculate_price_trend(
|
trend_state, _, _ = calculate_price_trend(
|
||||||
current_price,
|
current_price,
|
||||||
future_mean,
|
future_mean,
|
||||||
threshold_rising=thresholds["rising"],
|
threshold_rising=thresholds["rising"],
|
||||||
threshold_falling=thresholds["falling"],
|
threshold_falling=thresholds["falling"],
|
||||||
|
threshold_strongly_rising=thresholds["strongly_rising"],
|
||||||
|
threshold_strongly_falling=thresholds["strongly_falling"],
|
||||||
volatility_adjustment=True,
|
volatility_adjustment=True,
|
||||||
lookahead_intervals=intervals_in_3h,
|
lookahead_intervals=intervals_in_3h,
|
||||||
all_intervals=lookahead_for_volatility,
|
all_intervals=lookahead_for_volatility,
|
||||||
|
|
|
||||||
|
|
@ -4,14 +4,22 @@ from __future__ import annotations
|
||||||
|
|
||||||
from typing import TYPE_CHECKING
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import get_display_unit_factor
|
from custom_components.tibber_prices.const import (
|
||||||
|
CONF_VOLATILITY_THRESHOLD_HIGH,
|
||||||
|
CONF_VOLATILITY_THRESHOLD_MODERATE,
|
||||||
|
CONF_VOLATILITY_THRESHOLD_VERY_HIGH,
|
||||||
|
DEFAULT_VOLATILITY_THRESHOLD_HIGH,
|
||||||
|
DEFAULT_VOLATILITY_THRESHOLD_MODERATE,
|
||||||
|
DEFAULT_VOLATILITY_THRESHOLD_VERY_HIGH,
|
||||||
|
get_display_unit_factor,
|
||||||
|
)
|
||||||
from custom_components.tibber_prices.entity_utils import add_icon_color_attribute
|
from custom_components.tibber_prices.entity_utils import add_icon_color_attribute
|
||||||
from custom_components.tibber_prices.sensor.attributes import (
|
from custom_components.tibber_prices.sensor.attributes import (
|
||||||
add_volatility_type_attributes,
|
add_volatility_type_attributes,
|
||||||
get_prices_for_volatility,
|
get_prices_for_volatility,
|
||||||
)
|
)
|
||||||
from custom_components.tibber_prices.utils.average import calculate_mean
|
from custom_components.tibber_prices.utils.average import calculate_mean
|
||||||
from custom_components.tibber_prices.utils.price import calculate_volatility_level
|
from custom_components.tibber_prices.utils.price import calculate_volatility_with_cv
|
||||||
|
|
||||||
from .base import TibberPricesBaseCalculator
|
from .base import TibberPricesBaseCalculator
|
||||||
|
|
||||||
|
|
@ -58,14 +66,22 @@ class TibberPricesVolatilityCalculator(TibberPricesBaseCalculator):
|
||||||
|
|
||||||
# Get volatility thresholds from config
|
# Get volatility thresholds from config
|
||||||
thresholds = {
|
thresholds = {
|
||||||
"threshold_moderate": self.config.get("volatility_threshold_moderate", 5.0),
|
"threshold_moderate": self.config.get(
|
||||||
"threshold_high": self.config.get("volatility_threshold_high", 15.0),
|
CONF_VOLATILITY_THRESHOLD_MODERATE,
|
||||||
"threshold_very_high": self.config.get("volatility_threshold_very_high", 30.0),
|
DEFAULT_VOLATILITY_THRESHOLD_MODERATE,
|
||||||
|
),
|
||||||
|
"threshold_high": self.config.get(CONF_VOLATILITY_THRESHOLD_HIGH, DEFAULT_VOLATILITY_THRESHOLD_HIGH),
|
||||||
|
"threshold_very_high": self.config.get(
|
||||||
|
CONF_VOLATILITY_THRESHOLD_VERY_HIGH,
|
||||||
|
DEFAULT_VOLATILITY_THRESHOLD_VERY_HIGH,
|
||||||
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
# Get prices based on volatility type
|
# Get prices based on volatility type
|
||||||
prices_to_analyze = get_prices_for_volatility(
|
prices_to_analyze = get_prices_for_volatility(
|
||||||
volatility_type, self.coordinator.data, time=self.coordinator.time
|
volatility_type,
|
||||||
|
self.coordinator.data,
|
||||||
|
time=self.coordinator.time,
|
||||||
)
|
)
|
||||||
|
|
||||||
if not prices_to_analyze:
|
if not prices_to_analyze:
|
||||||
|
|
@ -82,16 +98,17 @@ class TibberPricesVolatilityCalculator(TibberPricesBaseCalculator):
|
||||||
factor = get_display_unit_factor(self.config_entry)
|
factor = get_display_unit_factor(self.config_entry)
|
||||||
spread_display = spread * factor
|
spread_display = spread * factor
|
||||||
|
|
||||||
# Calculate volatility level with custom thresholds (pass price list, not spread)
|
# Calculate volatility level AND coefficient of variation
|
||||||
volatility = calculate_volatility_level(prices_to_analyze, **thresholds)
|
volatility, cv = calculate_volatility_with_cv(prices_to_analyze, **thresholds)
|
||||||
|
|
||||||
# Store attributes for this sensor
|
# Store attributes for this sensor
|
||||||
self._last_volatility_attributes = {
|
self._last_volatility_attributes = {
|
||||||
"price_spread": round(spread_display, 2),
|
"price_spread": round(spread_display, 2),
|
||||||
"price_volatility": volatility,
|
"price_coefficient_variation_%": round(cv, 2) if cv is not None else None,
|
||||||
|
"price_volatility": volatility.lower(),
|
||||||
"price_min": round(price_min * factor, 2),
|
"price_min": round(price_min * factor, 2),
|
||||||
"price_max": round(price_max * factor, 2),
|
"price_max": round(price_max * factor, 2),
|
||||||
"price_mean": round(price_mean * factor, 2), # Mean used for volatility calculation
|
"price_mean": round(price_mean * factor, 2),
|
||||||
"interval_count": len(prices_to_analyze),
|
"interval_count": len(prices_to_analyze),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -177,6 +177,9 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
self._value_getter: Callable | None = self._get_value_getter()
|
self._value_getter: Callable | None = self._get_value_getter()
|
||||||
self._time_sensitive_remove_listener: Callable | None = None
|
self._time_sensitive_remove_listener: Callable | None = None
|
||||||
self._minute_update_remove_listener: Callable | None = None
|
self._minute_update_remove_listener: Callable | None = None
|
||||||
|
# Lifecycle sensor state change detection (for recorder optimization)
|
||||||
|
# Store as Any because native_value can be str/float/datetime depending on sensor type
|
||||||
|
self._last_lifecycle_state: Any = None
|
||||||
# Chart data export (for chart_data_export sensor) - from binary_sensor
|
# Chart data export (for chart_data_export sensor) - from binary_sensor
|
||||||
self._chart_data_last_update = None # Track last service call timestamp
|
self._chart_data_last_update = None # Track last service call timestamp
|
||||||
self._chart_data_error = None # Track last service call error
|
self._chart_data_error = None # Track last service call error
|
||||||
|
|
@ -221,7 +224,7 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
)
|
)
|
||||||
# Modify _state_info to add dynamic exclusion
|
# Modify _state_info to add dynamic exclusion
|
||||||
if self._state_info is None:
|
if self._state_info is None:
|
||||||
self._state_info = {}
|
self._state_info = {"unrecorded_attributes": frozenset()}
|
||||||
current_unrecorded = self._state_info.get("unrecorded_attributes", frozenset())
|
current_unrecorded = self._state_info.get("unrecorded_attributes", frozenset())
|
||||||
# State shows median → exclude price_median from attributes
|
# State shows median → exclude price_median from attributes
|
||||||
# State shows mean → exclude price_mean from attributes
|
# State shows mean → exclude price_mean from attributes
|
||||||
|
|
@ -312,7 +315,18 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
# Clear trend calculation cache for trend sensors
|
# Clear trend calculation cache for trend sensors
|
||||||
elif self.entity_description.key in ("current_price_trend", "next_price_trend_change"):
|
elif self.entity_description.key in ("current_price_trend", "next_price_trend_change"):
|
||||||
self._trend_calculator.clear_calculation_cache()
|
self._trend_calculator.clear_calculation_cache()
|
||||||
self.async_write_ha_state()
|
|
||||||
|
# For lifecycle sensor: Only write state if it actually changed (state-change filter)
|
||||||
|
# This enables precise detection at quarter-hour boundaries (23:45 turnover_pending,
|
||||||
|
# 13:00 searching_tomorrow, 00:00 turnover complete) without recorder spam
|
||||||
|
if self.entity_description.key == "data_lifecycle_status":
|
||||||
|
current_state = self.native_value
|
||||||
|
if current_state != self._last_lifecycle_state:
|
||||||
|
self._last_lifecycle_state = current_state
|
||||||
|
self.async_write_ha_state()
|
||||||
|
# If state didn't change, skip write to recorder
|
||||||
|
else:
|
||||||
|
self.async_write_ha_state()
|
||||||
|
|
||||||
@callback
|
@callback
|
||||||
def _handle_minute_update(self, time_service: TibberPricesTimeService) -> None:
|
def _handle_minute_update(self, time_service: TibberPricesTimeService) -> None:
|
||||||
|
|
@ -347,7 +361,16 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
# Schedule async refresh as a task (we're in a callback)
|
# Schedule async refresh as a task (we're in a callback)
|
||||||
self.hass.async_create_task(self._refresh_chart_metadata())
|
self.hass.async_create_task(self._refresh_chart_metadata())
|
||||||
|
|
||||||
super()._handle_coordinator_update()
|
# For lifecycle sensor: Only write state if it actually changed (event-based filter)
|
||||||
|
# Prevents excessive recorder entries while keeping quarter-hour update capability
|
||||||
|
if self.entity_description.key == "data_lifecycle_status":
|
||||||
|
current_state = self.native_value
|
||||||
|
if current_state != self._last_lifecycle_state:
|
||||||
|
self._last_lifecycle_state = current_state
|
||||||
|
super()._handle_coordinator_update()
|
||||||
|
# If state didn't change, skip write to recorder
|
||||||
|
else:
|
||||||
|
super()._handle_coordinator_update()
|
||||||
|
|
||||||
def _get_value_getter(self) -> Callable | None:
|
def _get_value_getter(self) -> Callable | None:
|
||||||
"""Return the appropriate value getter method based on the sensor type."""
|
"""Return the appropriate value getter method based on the sensor type."""
|
||||||
|
|
@ -964,11 +987,13 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
key = self.entity_description.key
|
key = self.entity_description.key
|
||||||
value = self.native_value
|
value = self.native_value
|
||||||
|
|
||||||
# Icon mapping for trend directions
|
# Icon mapping for trend directions (5-level scale)
|
||||||
trend_icons = {
|
trend_icons = {
|
||||||
|
"strongly_rising": "mdi:chevron-double-up",
|
||||||
"rising": "mdi:trending-up",
|
"rising": "mdi:trending-up",
|
||||||
"falling": "mdi:trending-down",
|
|
||||||
"stable": "mdi:trending-neutral",
|
"stable": "mdi:trending-neutral",
|
||||||
|
"falling": "mdi:trending-down",
|
||||||
|
"strongly_falling": "mdi:chevron-double-down",
|
||||||
}
|
}
|
||||||
|
|
||||||
# Special handling for next_price_trend_change: Icon based on direction attribute
|
# Special handling for next_price_trend_change: Icon based on direction attribute
|
||||||
|
|
|
||||||
|
|
@ -548,7 +548,7 @@ FUTURE_TREND_SENSORS = (
|
||||||
icon="mdi:trending-up", # Dynamic: trending-up/trending-down/trending-neutral based on current trend
|
icon="mdi:trending-up", # Dynamic: trending-up/trending-down/trending-neutral based on current trend
|
||||||
device_class=SensorDeviceClass.ENUM,
|
device_class=SensorDeviceClass.ENUM,
|
||||||
state_class=None, # Enum values: no statistics
|
state_class=None, # Enum values: no statistics
|
||||||
options=["rising", "falling", "stable"],
|
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
||||||
entity_registry_enabled_default=True,
|
entity_registry_enabled_default=True,
|
||||||
),
|
),
|
||||||
# Next trend change sensor (when will trend change?)
|
# Next trend change sensor (when will trend change?)
|
||||||
|
|
@ -570,7 +570,7 @@ FUTURE_TREND_SENSORS = (
|
||||||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||||
device_class=SensorDeviceClass.ENUM,
|
device_class=SensorDeviceClass.ENUM,
|
||||||
state_class=None, # Enum values: no statistics
|
state_class=None, # Enum values: no statistics
|
||||||
options=["rising", "falling", "stable"],
|
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
||||||
entity_registry_enabled_default=True,
|
entity_registry_enabled_default=True,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -580,7 +580,7 @@ FUTURE_TREND_SENSORS = (
|
||||||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||||
device_class=SensorDeviceClass.ENUM,
|
device_class=SensorDeviceClass.ENUM,
|
||||||
state_class=None, # Enum values: no statistics
|
state_class=None, # Enum values: no statistics
|
||||||
options=["rising", "falling", "stable"],
|
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
||||||
entity_registry_enabled_default=True,
|
entity_registry_enabled_default=True,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -590,7 +590,7 @@ FUTURE_TREND_SENSORS = (
|
||||||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||||
device_class=SensorDeviceClass.ENUM,
|
device_class=SensorDeviceClass.ENUM,
|
||||||
state_class=None, # Enum values: no statistics
|
state_class=None, # Enum values: no statistics
|
||||||
options=["rising", "falling", "stable"],
|
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
||||||
entity_registry_enabled_default=True,
|
entity_registry_enabled_default=True,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -600,7 +600,7 @@ FUTURE_TREND_SENSORS = (
|
||||||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||||
device_class=SensorDeviceClass.ENUM,
|
device_class=SensorDeviceClass.ENUM,
|
||||||
state_class=None, # Enum values: no statistics
|
state_class=None, # Enum values: no statistics
|
||||||
options=["rising", "falling", "stable"],
|
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
||||||
entity_registry_enabled_default=True,
|
entity_registry_enabled_default=True,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -610,7 +610,7 @@ FUTURE_TREND_SENSORS = (
|
||||||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||||
device_class=SensorDeviceClass.ENUM,
|
device_class=SensorDeviceClass.ENUM,
|
||||||
state_class=None, # Enum values: no statistics
|
state_class=None, # Enum values: no statistics
|
||||||
options=["rising", "falling", "stable"],
|
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
||||||
entity_registry_enabled_default=True,
|
entity_registry_enabled_default=True,
|
||||||
),
|
),
|
||||||
# Disabled by default: 6h, 8h, 12h
|
# Disabled by default: 6h, 8h, 12h
|
||||||
|
|
@ -621,7 +621,7 @@ FUTURE_TREND_SENSORS = (
|
||||||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||||
device_class=SensorDeviceClass.ENUM,
|
device_class=SensorDeviceClass.ENUM,
|
||||||
state_class=None, # Enum values: no statistics
|
state_class=None, # Enum values: no statistics
|
||||||
options=["rising", "falling", "stable"],
|
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
||||||
entity_registry_enabled_default=False,
|
entity_registry_enabled_default=False,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -631,7 +631,7 @@ FUTURE_TREND_SENSORS = (
|
||||||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||||
device_class=SensorDeviceClass.ENUM,
|
device_class=SensorDeviceClass.ENUM,
|
||||||
state_class=None, # Enum values: no statistics
|
state_class=None, # Enum values: no statistics
|
||||||
options=["rising", "falling", "stable"],
|
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
||||||
entity_registry_enabled_default=False,
|
entity_registry_enabled_default=False,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -641,7 +641,7 @@ FUTURE_TREND_SENSORS = (
|
||||||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||||
device_class=SensorDeviceClass.ENUM,
|
device_class=SensorDeviceClass.ENUM,
|
||||||
state_class=None, # Enum values: no statistics
|
state_class=None, # Enum values: no statistics
|
||||||
options=["rising", "falling", "stable"],
|
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
||||||
entity_registry_enabled_default=False,
|
entity_registry_enabled_default=False,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
@ -731,9 +731,9 @@ BEST_PRICE_TIMING_SENSORS = (
|
||||||
name="Best Price Period Duration",
|
name="Best Price Period Duration",
|
||||||
icon="mdi:timer",
|
icon="mdi:timer",
|
||||||
device_class=SensorDeviceClass.DURATION,
|
device_class=SensorDeviceClass.DURATION,
|
||||||
native_unit_of_measurement=UnitOfTime.MINUTES,
|
native_unit_of_measurement=UnitOfTime.HOURS,
|
||||||
state_class=None, # Changes with each period: no statistics
|
state_class=None, # Duration not needed in long-term statistics
|
||||||
suggested_display_precision=0,
|
suggested_display_precision=2,
|
||||||
entity_registry_enabled_default=False,
|
entity_registry_enabled_default=False,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -741,9 +741,10 @@ BEST_PRICE_TIMING_SENSORS = (
|
||||||
translation_key="best_price_remaining_minutes",
|
translation_key="best_price_remaining_minutes",
|
||||||
name="Best Price Remaining Time",
|
name="Best Price Remaining Time",
|
||||||
icon="mdi:timer-sand",
|
icon="mdi:timer-sand",
|
||||||
native_unit_of_measurement=UnitOfTime.MINUTES,
|
device_class=SensorDeviceClass.DURATION,
|
||||||
state_class=None, # Countdown timer: no statistics
|
native_unit_of_measurement=UnitOfTime.HOURS,
|
||||||
suggested_display_precision=0,
|
state_class=None, # Countdown timers excluded from statistics
|
||||||
|
suggested_display_precision=2,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
key="best_price_progress",
|
key="best_price_progress",
|
||||||
|
|
@ -767,9 +768,10 @@ BEST_PRICE_TIMING_SENSORS = (
|
||||||
translation_key="best_price_next_in_minutes",
|
translation_key="best_price_next_in_minutes",
|
||||||
name="Best Price Starts In",
|
name="Best Price Starts In",
|
||||||
icon="mdi:timer-outline",
|
icon="mdi:timer-outline",
|
||||||
native_unit_of_measurement=UnitOfTime.MINUTES,
|
device_class=SensorDeviceClass.DURATION,
|
||||||
state_class=None, # Countdown timer: no statistics
|
native_unit_of_measurement=UnitOfTime.HOURS,
|
||||||
suggested_display_precision=0,
|
state_class=None, # Next-start timers excluded from statistics
|
||||||
|
suggested_display_precision=2,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -788,9 +790,9 @@ PEAK_PRICE_TIMING_SENSORS = (
|
||||||
name="Peak Price Period Duration",
|
name="Peak Price Period Duration",
|
||||||
icon="mdi:timer",
|
icon="mdi:timer",
|
||||||
device_class=SensorDeviceClass.DURATION,
|
device_class=SensorDeviceClass.DURATION,
|
||||||
native_unit_of_measurement=UnitOfTime.MINUTES,
|
native_unit_of_measurement=UnitOfTime.HOURS,
|
||||||
state_class=None, # Changes with each period: no statistics
|
state_class=None, # Duration not needed in long-term statistics
|
||||||
suggested_display_precision=0,
|
suggested_display_precision=2,
|
||||||
entity_registry_enabled_default=False,
|
entity_registry_enabled_default=False,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -798,9 +800,10 @@ PEAK_PRICE_TIMING_SENSORS = (
|
||||||
translation_key="peak_price_remaining_minutes",
|
translation_key="peak_price_remaining_minutes",
|
||||||
name="Peak Price Remaining Time",
|
name="Peak Price Remaining Time",
|
||||||
icon="mdi:timer-sand",
|
icon="mdi:timer-sand",
|
||||||
native_unit_of_measurement=UnitOfTime.MINUTES,
|
device_class=SensorDeviceClass.DURATION,
|
||||||
state_class=None, # Countdown timer: no statistics
|
native_unit_of_measurement=UnitOfTime.HOURS,
|
||||||
suggested_display_precision=0,
|
state_class=None, # Countdown timers excluded from statistics
|
||||||
|
suggested_display_precision=2,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
key="peak_price_progress",
|
key="peak_price_progress",
|
||||||
|
|
@ -824,9 +827,10 @@ PEAK_PRICE_TIMING_SENSORS = (
|
||||||
translation_key="peak_price_next_in_minutes",
|
translation_key="peak_price_next_in_minutes",
|
||||||
name="Peak Price Starts In",
|
name="Peak Price Starts In",
|
||||||
icon="mdi:timer-outline",
|
icon="mdi:timer-outline",
|
||||||
native_unit_of_measurement=UnitOfTime.MINUTES,
|
device_class=SensorDeviceClass.DURATION,
|
||||||
state_class=None, # Countdown timer: no statistics
|
native_unit_of_measurement=UnitOfTime.HOURS,
|
||||||
suggested_display_precision=0,
|
state_class=None, # Next-start timers excluded from statistics
|
||||||
|
suggested_display_precision=2,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from typing import TYPE_CHECKING
|
from typing import TYPE_CHECKING, cast
|
||||||
|
|
||||||
from custom_components.tibber_prices.utils.average import (
|
from custom_components.tibber_prices.utils.average import (
|
||||||
calculate_current_leading_max,
|
calculate_current_leading_max,
|
||||||
|
|
@ -70,6 +70,14 @@ def get_value_getter_mapping( # noqa: PLR0913 - needs all calculators as parame
|
||||||
Dictionary mapping entity keys to their value getter callables.
|
Dictionary mapping entity keys to their value getter callables.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
def _minutes_to_hours(value: float | None) -> float | None:
|
||||||
|
"""Convert minutes to hours for duration-oriented sensors."""
|
||||||
|
if value is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return value / 60
|
||||||
|
|
||||||
return {
|
return {
|
||||||
# ================================================================
|
# ================================================================
|
||||||
# INTERVAL-BASED SENSORS - via IntervalCalculator
|
# INTERVAL-BASED SENSORS - via IntervalCalculator
|
||||||
|
|
@ -243,11 +251,17 @@ def get_value_getter_mapping( # noqa: PLR0913 - needs all calculators as parame
|
||||||
"best_price_end_time": lambda: timing_calculator.get_period_timing_value(
|
"best_price_end_time": lambda: timing_calculator.get_period_timing_value(
|
||||||
period_type="best_price", value_type="end_time"
|
period_type="best_price", value_type="end_time"
|
||||||
),
|
),
|
||||||
"best_price_period_duration": lambda: timing_calculator.get_period_timing_value(
|
"best_price_period_duration": lambda: _minutes_to_hours(
|
||||||
period_type="best_price", value_type="period_duration"
|
cast(
|
||||||
|
"float | None",
|
||||||
|
timing_calculator.get_period_timing_value(period_type="best_price", value_type="period_duration"),
|
||||||
|
)
|
||||||
),
|
),
|
||||||
"best_price_remaining_minutes": lambda: timing_calculator.get_period_timing_value(
|
"best_price_remaining_minutes": lambda: _minutes_to_hours(
|
||||||
period_type="best_price", value_type="remaining_minutes"
|
cast(
|
||||||
|
"float | None",
|
||||||
|
timing_calculator.get_period_timing_value(period_type="best_price", value_type="remaining_minutes"),
|
||||||
|
)
|
||||||
),
|
),
|
||||||
"best_price_progress": lambda: timing_calculator.get_period_timing_value(
|
"best_price_progress": lambda: timing_calculator.get_period_timing_value(
|
||||||
period_type="best_price", value_type="progress"
|
period_type="best_price", value_type="progress"
|
||||||
|
|
@ -255,18 +269,27 @@ def get_value_getter_mapping( # noqa: PLR0913 - needs all calculators as parame
|
||||||
"best_price_next_start_time": lambda: timing_calculator.get_period_timing_value(
|
"best_price_next_start_time": lambda: timing_calculator.get_period_timing_value(
|
||||||
period_type="best_price", value_type="next_start_time"
|
period_type="best_price", value_type="next_start_time"
|
||||||
),
|
),
|
||||||
"best_price_next_in_minutes": lambda: timing_calculator.get_period_timing_value(
|
"best_price_next_in_minutes": lambda: _minutes_to_hours(
|
||||||
period_type="best_price", value_type="next_in_minutes"
|
cast(
|
||||||
|
"float | None",
|
||||||
|
timing_calculator.get_period_timing_value(period_type="best_price", value_type="next_in_minutes"),
|
||||||
|
)
|
||||||
),
|
),
|
||||||
# Peak Price timing sensors
|
# Peak Price timing sensors
|
||||||
"peak_price_end_time": lambda: timing_calculator.get_period_timing_value(
|
"peak_price_end_time": lambda: timing_calculator.get_period_timing_value(
|
||||||
period_type="peak_price", value_type="end_time"
|
period_type="peak_price", value_type="end_time"
|
||||||
),
|
),
|
||||||
"peak_price_period_duration": lambda: timing_calculator.get_period_timing_value(
|
"peak_price_period_duration": lambda: _minutes_to_hours(
|
||||||
period_type="peak_price", value_type="period_duration"
|
cast(
|
||||||
|
"float | None",
|
||||||
|
timing_calculator.get_period_timing_value(period_type="peak_price", value_type="period_duration"),
|
||||||
|
)
|
||||||
),
|
),
|
||||||
"peak_price_remaining_minutes": lambda: timing_calculator.get_period_timing_value(
|
"peak_price_remaining_minutes": lambda: _minutes_to_hours(
|
||||||
period_type="peak_price", value_type="remaining_minutes"
|
cast(
|
||||||
|
"float | None",
|
||||||
|
timing_calculator.get_period_timing_value(period_type="peak_price", value_type="remaining_minutes"),
|
||||||
|
)
|
||||||
),
|
),
|
||||||
"peak_price_progress": lambda: timing_calculator.get_period_timing_value(
|
"peak_price_progress": lambda: timing_calculator.get_period_timing_value(
|
||||||
period_type="peak_price", value_type="progress"
|
period_type="peak_price", value_type="progress"
|
||||||
|
|
@ -274,8 +297,11 @@ def get_value_getter_mapping( # noqa: PLR0913 - needs all calculators as parame
|
||||||
"peak_price_next_start_time": lambda: timing_calculator.get_period_timing_value(
|
"peak_price_next_start_time": lambda: timing_calculator.get_period_timing_value(
|
||||||
period_type="peak_price", value_type="next_start_time"
|
period_type="peak_price", value_type="next_start_time"
|
||||||
),
|
),
|
||||||
"peak_price_next_in_minutes": lambda: timing_calculator.get_period_timing_value(
|
"peak_price_next_in_minutes": lambda: _minutes_to_hours(
|
||||||
period_type="peak_price", value_type="next_in_minutes"
|
cast(
|
||||||
|
"float | None",
|
||||||
|
timing_calculator.get_period_timing_value(period_type="peak_price", value_type="next_in_minutes"),
|
||||||
|
)
|
||||||
),
|
),
|
||||||
# Chart data export sensor
|
# Chart data export sensor
|
||||||
"chart_data_export": get_chart_data_export_value,
|
"chart_data_export": get_chart_data_export_value,
|
||||||
|
|
|
||||||
|
|
@ -46,12 +46,28 @@ get_apexcharts_yaml:
|
||||||
- rating_level
|
- rating_level
|
||||||
- level
|
- level
|
||||||
translation_key: level_type
|
translation_key: level_type
|
||||||
|
resolution:
|
||||||
|
required: false
|
||||||
|
default: interval
|
||||||
|
example: interval
|
||||||
|
selector:
|
||||||
|
select:
|
||||||
|
options:
|
||||||
|
- interval
|
||||||
|
- hourly
|
||||||
|
translation_key: resolution
|
||||||
highlight_best_price:
|
highlight_best_price:
|
||||||
required: false
|
required: false
|
||||||
default: true
|
default: true
|
||||||
example: true
|
example: true
|
||||||
selector:
|
selector:
|
||||||
boolean:
|
boolean:
|
||||||
|
highlight_peak_price:
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
example: false
|
||||||
|
selector:
|
||||||
|
boolean:
|
||||||
get_chartdata:
|
get_chartdata:
|
||||||
fields:
|
fields:
|
||||||
general:
|
general:
|
||||||
|
|
@ -245,3 +261,12 @@ refresh_user_data:
|
||||||
selector:
|
selector:
|
||||||
config_entry:
|
config_entry:
|
||||||
integration: tibber_prices
|
integration: tibber_prices
|
||||||
|
|
||||||
|
debug_clear_tomorrow:
|
||||||
|
fields:
|
||||||
|
entry_id:
|
||||||
|
required: false
|
||||||
|
example: "1234567890abcdef"
|
||||||
|
selector:
|
||||||
|
config_entry:
|
||||||
|
integration: tibber_prices
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ This package provides service endpoints for external integrations and data expor
|
||||||
- Chart data export (get_chartdata)
|
- Chart data export (get_chartdata)
|
||||||
- ApexCharts YAML generation (get_apexcharts_yaml)
|
- ApexCharts YAML generation (get_apexcharts_yaml)
|
||||||
- User data refresh (refresh_user_data)
|
- User data refresh (refresh_user_data)
|
||||||
|
- Debug: Clear tomorrow data (debug_clear_tomorrow) - DevContainer only
|
||||||
|
|
||||||
Architecture:
|
Architecture:
|
||||||
- helpers.py: Common utilities (get_entry_and_data)
|
- helpers.py: Common utilities (get_entry_and_data)
|
||||||
|
|
@ -12,11 +13,13 @@ Architecture:
|
||||||
- chartdata.py: Main data export service handler
|
- chartdata.py: Main data export service handler
|
||||||
- apexcharts.py: ApexCharts card YAML generator
|
- apexcharts.py: ApexCharts card YAML generator
|
||||||
- refresh_user_data.py: User data refresh handler
|
- refresh_user_data.py: User data refresh handler
|
||||||
|
- debug_clear_tomorrow.py: Debug tool for testing tomorrow refresh (dev only)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import os
|
||||||
from typing import TYPE_CHECKING
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import DOMAIN
|
from custom_components.tibber_prices.const import DOMAIN
|
||||||
|
|
@ -42,6 +45,9 @@ __all__ = [
|
||||||
"async_setup_services",
|
"async_setup_services",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# Check if running in development mode (DevContainer)
|
||||||
|
_IS_DEV_MODE = os.environ.get("TIBBER_PRICES_DEV") == "1"
|
||||||
|
|
||||||
|
|
||||||
@callback
|
@callback
|
||||||
def async_setup_services(hass: HomeAssistant) -> None:
|
def async_setup_services(hass: HomeAssistant) -> None:
|
||||||
|
|
@ -74,3 +80,19 @@ def async_setup_services(hass: HomeAssistant) -> None:
|
||||||
schema=REFRESH_USER_DATA_SERVICE_SCHEMA,
|
schema=REFRESH_USER_DATA_SERVICE_SCHEMA,
|
||||||
supports_response=SupportsResponse.ONLY,
|
supports_response=SupportsResponse.ONLY,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Debug services - only available in DevContainer (TIBBER_PRICES_DEV=1)
|
||||||
|
if _IS_DEV_MODE:
|
||||||
|
from .debug_clear_tomorrow import ( # noqa: PLC0415 - Conditional import for dev-only service
|
||||||
|
DEBUG_CLEAR_TOMORROW_SERVICE_NAME,
|
||||||
|
DEBUG_CLEAR_TOMORROW_SERVICE_SCHEMA,
|
||||||
|
handle_debug_clear_tomorrow,
|
||||||
|
)
|
||||||
|
|
||||||
|
hass.services.async_register(
|
||||||
|
DOMAIN,
|
||||||
|
DEBUG_CLEAR_TOMORROW_SERVICE_NAME,
|
||||||
|
handle_debug_clear_tomorrow,
|
||||||
|
schema=DEBUG_CLEAR_TOMORROW_SERVICE_SCHEMA,
|
||||||
|
supports_response=SupportsResponse.ONLY,
|
||||||
|
)
|
||||||
|
|
|
||||||
238
custom_components/tibber_prices/services/debug_clear_tomorrow.py
Normal file
238
custom_components/tibber_prices/services/debug_clear_tomorrow.py
Normal file
|
|
@ -0,0 +1,238 @@
|
||||||
|
"""
|
||||||
|
Debug service to clear tomorrow's data from the interval pool.
|
||||||
|
|
||||||
|
This service is intended for testing the tomorrow data refresh cycle without
|
||||||
|
having to wait for the next day or restart Home Assistant.
|
||||||
|
|
||||||
|
WARNING: This is a debug/development tool. Use with caution in production.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
service: tibber_prices.debug_clear_tomorrow
|
||||||
|
data: {}
|
||||||
|
|
||||||
|
After calling this service:
|
||||||
|
1. The tomorrow data will be removed from the interval pool
|
||||||
|
2. The lifecycle sensor will show "searching_tomorrow" (after 13:00)
|
||||||
|
3. The next Timer #1 cycle will fetch tomorrow data from the API
|
||||||
|
4. You can observe the full refresh cycle in real-time
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from typing import TYPE_CHECKING, Any
|
||||||
|
|
||||||
|
import voluptuous as vol
|
||||||
|
|
||||||
|
from custom_components.tibber_prices.const import DOMAIN
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from custom_components.tibber_prices.coordinator import TibberPricesDataUpdateCoordinator
|
||||||
|
from homeassistant.core import ServiceCall, ServiceResponse
|
||||||
|
|
||||||
|
_LOGGER = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
DEBUG_CLEAR_TOMORROW_SERVICE_NAME = "debug_clear_tomorrow"
|
||||||
|
DEBUG_CLEAR_TOMORROW_SERVICE_SCHEMA = vol.Schema(
|
||||||
|
{
|
||||||
|
vol.Optional("entry_id"): str,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def handle_debug_clear_tomorrow(call: ServiceCall) -> ServiceResponse:
|
||||||
|
"""
|
||||||
|
Handle the debug_clear_tomorrow service call.
|
||||||
|
|
||||||
|
Removes tomorrow's intervals from the interval pool to allow testing
|
||||||
|
of the tomorrow data refresh cycle.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with operation results (intervals removed, pool stats before/after).
|
||||||
|
|
||||||
|
"""
|
||||||
|
hass = call.hass
|
||||||
|
|
||||||
|
# Get entry_id from call data or use first available
|
||||||
|
entry_id = call.data.get("entry_id")
|
||||||
|
|
||||||
|
if entry_id:
|
||||||
|
entry = next(
|
||||||
|
(e for e in hass.config_entries.async_entries(DOMAIN) if e.entry_id == entry_id),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Use first available entry
|
||||||
|
entries = hass.config_entries.async_entries(DOMAIN)
|
||||||
|
entry = entries[0] if entries else None
|
||||||
|
|
||||||
|
if not entry or not hasattr(entry, "runtime_data") or not entry.runtime_data:
|
||||||
|
return {"success": False, "error": "No valid config entry found"}
|
||||||
|
|
||||||
|
coordinator: TibberPricesDataUpdateCoordinator = entry.runtime_data.coordinator
|
||||||
|
|
||||||
|
# Get pool manager from coordinator
|
||||||
|
pool = coordinator._price_data_manager._interval_pool # noqa: SLF001
|
||||||
|
|
||||||
|
# Get stats before
|
||||||
|
stats_before = pool.get_pool_stats()
|
||||||
|
|
||||||
|
# Calculate tomorrow's date range
|
||||||
|
now = coordinator.time.now()
|
||||||
|
now_local = coordinator.time.as_local(now)
|
||||||
|
tomorrow_start = (now_local + timedelta(days=1)).replace(hour=0, minute=0, second=0, microsecond=0)
|
||||||
|
tomorrow_end = (now_local + timedelta(days=2)).replace(hour=0, minute=0, second=0, microsecond=0)
|
||||||
|
|
||||||
|
_LOGGER.info(
|
||||||
|
"DEBUG: Clearing tomorrow's data from pool (range: %s to %s)",
|
||||||
|
tomorrow_start.isoformat(),
|
||||||
|
tomorrow_end.isoformat(),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Remove tomorrow's intervals from the pool index
|
||||||
|
removed_count = await _clear_intervals_in_range(pool, tomorrow_start.isoformat(), tomorrow_end.isoformat())
|
||||||
|
|
||||||
|
# Also remove tomorrow's intervals from coordinator.data["priceInfo"]
|
||||||
|
# This ensures sensors show "unknown" for tomorrow data
|
||||||
|
removed_from_coordinator = _clear_intervals_from_coordinator(coordinator, tomorrow_start, tomorrow_end)
|
||||||
|
|
||||||
|
# Get stats after
|
||||||
|
stats_after = pool.get_pool_stats()
|
||||||
|
|
||||||
|
# Force coordinator to re-check tomorrow data status and update ALL sensors
|
||||||
|
# This updates the lifecycle sensor and makes tomorrow sensors show "unknown"
|
||||||
|
coordinator.async_update_listeners()
|
||||||
|
|
||||||
|
result: dict[str, Any] = {
|
||||||
|
"success": True,
|
||||||
|
"intervals_removed_from_pool": removed_count,
|
||||||
|
"intervals_removed_from_coordinator": removed_from_coordinator,
|
||||||
|
"tomorrow_range": {
|
||||||
|
"start": tomorrow_start.isoformat(),
|
||||||
|
"end": tomorrow_end.isoformat(),
|
||||||
|
},
|
||||||
|
"pool_stats_before": {
|
||||||
|
"cache_intervals_total": stats_before.get("cache_intervals_total"),
|
||||||
|
"cache_newest_interval": stats_before.get("cache_newest_interval"),
|
||||||
|
},
|
||||||
|
"pool_stats_after": {
|
||||||
|
"cache_intervals_total": stats_after.get("cache_intervals_total"),
|
||||||
|
"cache_newest_interval": stats_after.get("cache_newest_interval"),
|
||||||
|
},
|
||||||
|
"message": f"Removed {removed_count} tomorrow intervals. Next Timer #1 cycle will fetch new data.",
|
||||||
|
}
|
||||||
|
|
||||||
|
_LOGGER.info("DEBUG: Clear tomorrow complete - %s", result)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def _clear_intervals_from_coordinator(
|
||||||
|
coordinator: TibberPricesDataUpdateCoordinator,
|
||||||
|
start_dt: datetime,
|
||||||
|
end_dt: datetime,
|
||||||
|
) -> int:
|
||||||
|
"""
|
||||||
|
Remove intervals from coordinator.data["priceInfo"] in the given time range.
|
||||||
|
|
||||||
|
This ensures sensors show "unknown" for the removed intervals.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
coordinator: TibberPricesDataUpdateCoordinator instance.
|
||||||
|
start_dt: Start datetime (inclusive).
|
||||||
|
end_dt: End datetime (exclusive).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Number of intervals removed.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if not coordinator.data or "priceInfo" not in coordinator.data:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
price_info = coordinator.data["priceInfo"]
|
||||||
|
original_count = len(price_info)
|
||||||
|
|
||||||
|
# Filter out intervals in the range
|
||||||
|
# Intervals have startsAt as datetime objects (after parse_all_timestamps)
|
||||||
|
filtered = []
|
||||||
|
for interval in price_info:
|
||||||
|
starts_at = interval.get("startsAt")
|
||||||
|
if starts_at is None:
|
||||||
|
filtered.append(interval)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Handle both datetime and string formats
|
||||||
|
starts_at_dt = datetime.fromisoformat(starts_at) if isinstance(starts_at, str) else starts_at
|
||||||
|
|
||||||
|
# Keep intervals outside the removal range
|
||||||
|
if starts_at_dt < start_dt or starts_at_dt >= end_dt:
|
||||||
|
filtered.append(interval)
|
||||||
|
|
||||||
|
# Update coordinator.data in place
|
||||||
|
coordinator.data["priceInfo"] = filtered
|
||||||
|
|
||||||
|
removed_count = original_count - len(filtered)
|
||||||
|
_LOGGER.debug(
|
||||||
|
"DEBUG: Removed %d intervals from coordinator.data (had %d, now %d)",
|
||||||
|
removed_count,
|
||||||
|
original_count,
|
||||||
|
len(filtered),
|
||||||
|
)
|
||||||
|
|
||||||
|
return removed_count
|
||||||
|
|
||||||
|
|
||||||
|
async def _clear_intervals_in_range(
|
||||||
|
pool: Any,
|
||||||
|
start_iso: str,
|
||||||
|
end_iso: str,
|
||||||
|
) -> int:
|
||||||
|
"""
|
||||||
|
Remove intervals in the given time range from the pool.
|
||||||
|
|
||||||
|
This manipulates the pool's internal cache to remove specific intervals.
|
||||||
|
Used only for debug/testing purposes.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pool: IntervalPoolManager instance.
|
||||||
|
start_iso: ISO timestamp string (inclusive).
|
||||||
|
end_iso: ISO timestamp string (exclusive).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Number of intervals removed.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# Access internal index
|
||||||
|
index = pool._index # noqa: SLF001
|
||||||
|
|
||||||
|
# Parse range
|
||||||
|
start_dt = datetime.fromisoformat(start_iso)
|
||||||
|
end_dt = datetime.fromisoformat(end_iso)
|
||||||
|
|
||||||
|
# Find all timestamps in range
|
||||||
|
removed_count = 0
|
||||||
|
current_dt = start_dt
|
||||||
|
|
||||||
|
while current_dt < end_dt:
|
||||||
|
current_key = current_dt.isoformat()[:19]
|
||||||
|
|
||||||
|
# Check if this timestamp exists in index
|
||||||
|
location = index.get(current_key)
|
||||||
|
if location is not None:
|
||||||
|
# Remove from index
|
||||||
|
index.remove(current_key)
|
||||||
|
removed_count += 1
|
||||||
|
|
||||||
|
# Move to next 15-min interval
|
||||||
|
current_dt += timedelta(minutes=15)
|
||||||
|
|
||||||
|
# Note: We only remove from the index, not from the fetch_groups.
|
||||||
|
# The intervals will remain in fetch_groups but won't be found via index lookup.
|
||||||
|
# This is simpler and safe - GC will clean up orphaned intervals eventually.
|
||||||
|
|
||||||
|
# Persist the updated pool state via manager's save method
|
||||||
|
await pool._auto_save_pool_state() # noqa: SLF001
|
||||||
|
|
||||||
|
return removed_count
|
||||||
|
|
@ -24,6 +24,8 @@ from datetime import datetime, time
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import (
|
from custom_components.tibber_prices.const import (
|
||||||
|
CONF_AVERAGE_SENSOR_DISPLAY,
|
||||||
|
DEFAULT_AVERAGE_SENSOR_DISPLAY,
|
||||||
DEFAULT_PRICE_RATING_THRESHOLD_HIGH,
|
DEFAULT_PRICE_RATING_THRESHOLD_HIGH,
|
||||||
DEFAULT_PRICE_RATING_THRESHOLD_LOW,
|
DEFAULT_PRICE_RATING_THRESHOLD_LOW,
|
||||||
get_translation,
|
get_translation,
|
||||||
|
|
@ -32,6 +34,7 @@ from custom_components.tibber_prices.coordinator.helpers import (
|
||||||
get_intervals_for_day_offsets,
|
get_intervals_for_day_offsets,
|
||||||
)
|
)
|
||||||
from custom_components.tibber_prices.sensor.helpers import aggregate_level_data, aggregate_rating_data
|
from custom_components.tibber_prices.sensor.helpers import aggregate_level_data, aggregate_rating_data
|
||||||
|
from custom_components.tibber_prices.utils.average import calculate_mean, calculate_median
|
||||||
|
|
||||||
|
|
||||||
def normalize_level_filter(value: list[str] | None) -> list[str] | None:
|
def normalize_level_filter(value: list[str] | None) -> list[str] | None:
|
||||||
|
|
@ -48,6 +51,99 @@ def normalize_rating_level_filter(value: list[str] | None) -> list[str] | None:
|
||||||
return [v.upper() for v in value]
|
return [v.upper() for v in value]
|
||||||
|
|
||||||
|
|
||||||
|
def aggregate_to_hourly( # noqa: PLR0912
|
||||||
|
intervals: list[dict],
|
||||||
|
coordinator: Any,
|
||||||
|
threshold_low: float = DEFAULT_PRICE_RATING_THRESHOLD_LOW,
|
||||||
|
threshold_high: float = DEFAULT_PRICE_RATING_THRESHOLD_HIGH,
|
||||||
|
) -> list[dict]:
|
||||||
|
"""
|
||||||
|
Aggregate 15-minute intervals to hourly using rolling 5-interval window.
|
||||||
|
|
||||||
|
Preserves original field names (startsAt, total, level, rating_level) so the
|
||||||
|
aggregated data can be processed by the same code path as interval data.
|
||||||
|
|
||||||
|
Uses the same methodology as sensor rolling hour calculations:
|
||||||
|
- 5-interval window: 2 before + center + 2 after (60 minutes total)
|
||||||
|
- Center interval is at :00 of each hour
|
||||||
|
- Respects user's CONF_AVERAGE_SENSOR_DISPLAY setting (mean vs median)
|
||||||
|
|
||||||
|
Example for 10:00 data point:
|
||||||
|
- Window includes: 09:30, 09:45, 10:00, 10:15, 10:30
|
||||||
|
|
||||||
|
Args:
|
||||||
|
intervals: List of 15-minute price intervals with startsAt, total, level, rating_level
|
||||||
|
coordinator: Data update coordinator instance
|
||||||
|
threshold_low: Rating level threshold (low/normal boundary)
|
||||||
|
threshold_high: Rating level threshold (normal/high boundary)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of hourly data points with same structure as input (startsAt, total, level, rating_level)
|
||||||
|
|
||||||
|
"""
|
||||||
|
if not intervals:
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Get user's average display preference (mean or median)
|
||||||
|
average_display = coordinator.config_entry.options.get(CONF_AVERAGE_SENSOR_DISPLAY, DEFAULT_AVERAGE_SENSOR_DISPLAY)
|
||||||
|
use_median = average_display == "median"
|
||||||
|
|
||||||
|
hourly_data = []
|
||||||
|
|
||||||
|
# Iterate through all intervals, only process those at :00
|
||||||
|
for i, interval in enumerate(intervals):
|
||||||
|
start_time = interval.get("startsAt")
|
||||||
|
|
||||||
|
if not start_time:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if this is the start of an hour (:00)
|
||||||
|
if start_time.minute != 0:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Collect 5-interval rolling window: -2, -1, 0, +1, +2
|
||||||
|
window_prices: list[float] = []
|
||||||
|
window_intervals: list[dict] = []
|
||||||
|
|
||||||
|
for offset in range(-2, 3): # -2, -1, 0, +1, +2
|
||||||
|
target_idx = i + offset
|
||||||
|
if 0 <= target_idx < len(intervals):
|
||||||
|
target_interval = intervals[target_idx]
|
||||||
|
price = target_interval.get("total")
|
||||||
|
if price is not None:
|
||||||
|
window_prices.append(price)
|
||||||
|
window_intervals.append(target_interval)
|
||||||
|
|
||||||
|
# Calculate aggregated price based on user preference
|
||||||
|
if window_prices:
|
||||||
|
aggregated_price = calculate_median(window_prices) if use_median else calculate_mean(window_prices)
|
||||||
|
|
||||||
|
if aggregated_price is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Build data point with original field names
|
||||||
|
data_point: dict[str, Any] = {
|
||||||
|
"startsAt": start_time,
|
||||||
|
"total": aggregated_price,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add aggregated level
|
||||||
|
if window_intervals:
|
||||||
|
aggregated_level = aggregate_level_data(window_intervals)
|
||||||
|
if aggregated_level:
|
||||||
|
data_point["level"] = aggregated_level.upper()
|
||||||
|
|
||||||
|
# Add aggregated rating_level
|
||||||
|
if window_intervals:
|
||||||
|
aggregated_rating = aggregate_rating_data(window_intervals, threshold_low, threshold_high)
|
||||||
|
if aggregated_rating:
|
||||||
|
data_point["rating_level"] = aggregated_rating.upper()
|
||||||
|
|
||||||
|
hourly_data.append(data_point)
|
||||||
|
|
||||||
|
return hourly_data
|
||||||
|
|
||||||
|
|
||||||
def aggregate_hourly_exact( # noqa: PLR0913, PLR0912, PLR0915
|
def aggregate_hourly_exact( # noqa: PLR0913, PLR0912, PLR0915
|
||||||
intervals: list[dict],
|
intervals: list[dict],
|
||||||
start_time_field: str,
|
start_time_field: str,
|
||||||
|
|
|
||||||
|
|
@ -63,7 +63,9 @@ APEXCHARTS_SERVICE_SCHEMA = vol.Schema(
|
||||||
vol.Required(ATTR_ENTRY_ID): cv.string,
|
vol.Required(ATTR_ENTRY_ID): cv.string,
|
||||||
vol.Optional("day"): vol.In(["yesterday", "today", "tomorrow", "rolling_window", "rolling_window_autozoom"]),
|
vol.Optional("day"): vol.In(["yesterday", "today", "tomorrow", "rolling_window", "rolling_window_autozoom"]),
|
||||||
vol.Optional("level_type", default="rating_level"): vol.In(["rating_level", "level"]),
|
vol.Optional("level_type", default="rating_level"): vol.In(["rating_level", "level"]),
|
||||||
|
vol.Optional("resolution", default="interval"): vol.In(["interval", "hourly"]),
|
||||||
vol.Optional("highlight_best_price", default=True): cv.boolean,
|
vol.Optional("highlight_best_price", default=True): cv.boolean,
|
||||||
|
vol.Optional("highlight_peak_price", default=False): cv.boolean,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -295,7 +297,9 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
||||||
|
|
||||||
day = call.data.get("day") # Can be None (rolling window mode)
|
day = call.data.get("day") # Can be None (rolling window mode)
|
||||||
level_type = call.data.get("level_type", "rating_level")
|
level_type = call.data.get("level_type", "rating_level")
|
||||||
|
resolution = call.data.get("resolution", "interval")
|
||||||
highlight_best_price = call.data.get("highlight_best_price", True)
|
highlight_best_price = call.data.get("highlight_best_price", True)
|
||||||
|
highlight_peak_price = call.data.get("highlight_peak_price", False)
|
||||||
|
|
||||||
# Get user's language from hass config
|
# Get user's language from hass config
|
||||||
user_language = hass.config.language or "en"
|
user_language = hass.config.language or "en"
|
||||||
|
|
@ -310,6 +314,10 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
||||||
use_subunit = display_mode == DISPLAY_MODE_SUBUNIT
|
use_subunit = display_mode == DISPLAY_MODE_SUBUNIT
|
||||||
price_unit = get_display_unit_string(config_entry, currency)
|
price_unit = get_display_unit_string(config_entry, currency)
|
||||||
|
|
||||||
|
# Add average symbol suffix for hourly resolution (suffix to avoid confusion with øre/öre)
|
||||||
|
if resolution == "hourly":
|
||||||
|
price_unit = f"{price_unit} (Ø)"
|
||||||
|
|
||||||
# Get entity registry for mapping
|
# Get entity registry for mapping
|
||||||
entity_registry = async_get_entity_registry(hass)
|
entity_registry = async_get_entity_registry(hass)
|
||||||
|
|
||||||
|
|
@ -333,8 +341,20 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
||||||
]
|
]
|
||||||
series = []
|
series = []
|
||||||
|
|
||||||
# Get translated name for best price periods (needed for layer)
|
# Get translated names for overlays (best/peak)
|
||||||
best_price_name = get_translation(["apexcharts", "best_price_period_name"], user_language) or "Best Price Period"
|
# Include triangle icons for visual distinction in legend
|
||||||
|
# ▼ (U+25BC) = down/minimum = best price periods
|
||||||
|
# ▲ (U+25B2) = up/maximum = peak price periods
|
||||||
|
best_price_name = "▼ " + (
|
||||||
|
get_translation(["apexcharts", "best_price_period_name"], user_language) or "Best Price Period"
|
||||||
|
)
|
||||||
|
peak_price_name = "▲ " + (
|
||||||
|
get_translation(["apexcharts", "peak_price_period_name"], user_language) or "Peak Price Period"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Track overlays added for tooltip index calculation later
|
||||||
|
best_overlay_added = False
|
||||||
|
peak_overlay_added = False
|
||||||
|
|
||||||
# Add best price period highlight overlay FIRST (so it renders behind all other series)
|
# Add best price period highlight overlay FIRST (so it renders behind all other series)
|
||||||
if highlight_best_price and entity_map:
|
if highlight_best_price and entity_map:
|
||||||
|
|
@ -354,7 +374,7 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
||||||
f"service: 'get_chartdata', "
|
f"service: 'get_chartdata', "
|
||||||
f"return_response: true, "
|
f"return_response: true, "
|
||||||
f"service_data: {{ entry_id: '{entry_id}', {day_param}"
|
f"service_data: {{ entry_id: '{entry_id}', {day_param}"
|
||||||
f"period_filter: 'best_price', "
|
f"period_filter: 'best_price', resolution: '{resolution}', "
|
||||||
f"output_format: 'array_of_arrays', insert_nulls: 'segments', subunit_currency: {subunit_param} }} }}); "
|
f"output_format: 'array_of_arrays', insert_nulls: 'segments', subunit_currency: {subunit_param} }} }}); "
|
||||||
f"const originalData = response.response.data; "
|
f"const originalData = response.response.data; "
|
||||||
f"return originalData.map((point, i) => {{ "
|
f"return originalData.map((point, i) => {{ "
|
||||||
|
|
@ -367,6 +387,11 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
||||||
# Use first entity from entity_map (reuse existing entity to avoid extra header entries)
|
# Use first entity from entity_map (reuse existing entity to avoid extra header entries)
|
||||||
best_price_entity = next(iter(entity_map.values()))
|
best_price_entity = next(iter(entity_map.values()))
|
||||||
|
|
||||||
|
# Legend toggle logic:
|
||||||
|
# - Only best price selected: no legend (in_legend: False)
|
||||||
|
# - Both selected: show in legend, toggleable (in_legend: True)
|
||||||
|
best_price_in_legend = highlight_peak_price # Only show in legend if peak is also enabled
|
||||||
|
|
||||||
series.append(
|
series.append(
|
||||||
{
|
{
|
||||||
"entity": best_price_entity,
|
"entity": best_price_entity,
|
||||||
|
|
@ -374,11 +399,56 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
||||||
"type": "area",
|
"type": "area",
|
||||||
"color": "rgba(46, 204, 113, 0.05)", # Ultra-subtle green overlay (barely visible)
|
"color": "rgba(46, 204, 113, 0.05)", # Ultra-subtle green overlay (barely visible)
|
||||||
"yaxis_id": "highlight", # Use separate Y-axis (0-1) for full-height overlay
|
"yaxis_id": "highlight", # Use separate Y-axis (0-1) for full-height overlay
|
||||||
"show": {"legend_value": False, "in_header": False, "in_legend": False},
|
"show": {"legend_value": False, "in_header": False, "in_legend": best_price_in_legend},
|
||||||
"data_generator": best_price_generator,
|
"data_generator": best_price_generator,
|
||||||
"stroke_width": 0,
|
"stroke_width": 0,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
best_overlay_added = True
|
||||||
|
|
||||||
|
# Add peak price period highlight overlay (renders behind series as well)
|
||||||
|
if highlight_peak_price and entity_map:
|
||||||
|
# Conditionally include day parameter (omit for rolling window mode)
|
||||||
|
day_param = "" if day in ("rolling_window", "rolling_window_autozoom", None) else f"day: ['{day}'], "
|
||||||
|
subunit_param = "true" if use_subunit else "false"
|
||||||
|
peak_price_generator = (
|
||||||
|
f"const response = await hass.callWS({{ "
|
||||||
|
f"type: 'call_service', "
|
||||||
|
f"domain: 'tibber_prices', "
|
||||||
|
f"service: 'get_chartdata', "
|
||||||
|
f"return_response: true, "
|
||||||
|
f"service_data: {{ entry_id: '{entry_id}', {day_param}"
|
||||||
|
f"period_filter: 'peak_price', resolution: '{resolution}', "
|
||||||
|
f"output_format: 'array_of_arrays', insert_nulls: 'segments', subunit_currency: {subunit_param} }} }}); "
|
||||||
|
f"const originalData = response.response.data; "
|
||||||
|
f"return originalData.map((point, i) => {{ "
|
||||||
|
f"const result = [point[0], point[1] === null ? null : 1]; "
|
||||||
|
f"result.originalPrice = point[1]; "
|
||||||
|
f"return result; "
|
||||||
|
f"}});"
|
||||||
|
)
|
||||||
|
|
||||||
|
peak_price_entity = next(iter(entity_map.values()))
|
||||||
|
|
||||||
|
# Peak price: always show in legend when enabled (for toggle), start hidden by default
|
||||||
|
series.append(
|
||||||
|
{
|
||||||
|
"entity": peak_price_entity,
|
||||||
|
"name": peak_price_name,
|
||||||
|
"type": "area",
|
||||||
|
"color": "rgba(231, 76, 60, 0.06)", # Subtle red overlay for peak price
|
||||||
|
"yaxis_id": "highlight",
|
||||||
|
"show": {
|
||||||
|
"legend_value": False,
|
||||||
|
"in_header": False,
|
||||||
|
"in_legend": True,
|
||||||
|
"hidden_by_default": True, # Start hidden, user can toggle via legend
|
||||||
|
},
|
||||||
|
"data_generator": peak_price_generator,
|
||||||
|
"stroke_width": 0,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
peak_overlay_added = True
|
||||||
|
|
||||||
# Only create series for levels that have a matching entity (filter out missing levels)
|
# Only create series for levels that have a matching entity (filter out missing levels)
|
||||||
for level_key, color in series_levels:
|
for level_key, color in series_levels:
|
||||||
|
|
@ -409,7 +479,7 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
||||||
f"domain: 'tibber_prices', "
|
f"domain: 'tibber_prices', "
|
||||||
f"service: 'get_chartdata', "
|
f"service: 'get_chartdata', "
|
||||||
f"return_response: true, "
|
f"return_response: true, "
|
||||||
f"service_data: {{ entry_id: '{entry_id}', {day_param}{filter_param}, "
|
f"service_data: {{ entry_id: '{entry_id}', {day_param}{filter_param}, resolution: '{resolution}', "
|
||||||
f"output_format: 'array_of_arrays', insert_nulls: 'segments', subunit_currency: {subunit_param}, "
|
f"output_format: 'array_of_arrays', insert_nulls: 'segments', subunit_currency: {subunit_param}, "
|
||||||
f"connect_segments: true }} }}); "
|
f"connect_segments: true }} }}); "
|
||||||
f"return response.response.data;"
|
f"return response.response.data;"
|
||||||
|
|
@ -422,7 +492,7 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
||||||
f"domain: 'tibber_prices', "
|
f"domain: 'tibber_prices', "
|
||||||
f"service: 'get_chartdata', "
|
f"service: 'get_chartdata', "
|
||||||
f"return_response: true, "
|
f"return_response: true, "
|
||||||
f"service_data: {{ entry_id: '{entry_id}', {day_param}{filter_param}, "
|
f"service_data: {{ entry_id: '{entry_id}', {day_param}{filter_param}, resolution: '{resolution}', "
|
||||||
f"output_format: 'array_of_arrays', insert_nulls: 'segments', subunit_currency: {subunit_param}, "
|
f"output_format: 'array_of_arrays', insert_nulls: 'segments', subunit_currency: {subunit_param}, "
|
||||||
f"connect_segments: true }} }}); "
|
f"connect_segments: true }} }}); "
|
||||||
f"return response.response.data;"
|
f"return response.response.data;"
|
||||||
|
|
@ -431,10 +501,13 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
||||||
# rating_level LOW/HIGH: Show raw state in header (entity state = min/max price of day)
|
# rating_level LOW/HIGH: Show raw state in header (entity state = min/max price of day)
|
||||||
# rating_level NORMAL: Hide from header (not meaningful as extrema)
|
# rating_level NORMAL: Hide from header (not meaningful as extrema)
|
||||||
# level (VERY_CHEAP/CHEAP/etc): Hide from header (entity state is aggregated value)
|
# level (VERY_CHEAP/CHEAP/etc): Hide from header (entity state is aggregated value)
|
||||||
|
# Price level series are hidden from legend only when best/peak overlays are enabled
|
||||||
|
# (to keep legend clean for toggle-only items)
|
||||||
|
hide_from_legend = highlight_best_price or highlight_peak_price
|
||||||
if level_type == "rating_level" and level_key in (PRICE_RATING_LOW, PRICE_RATING_HIGH):
|
if level_type == "rating_level" and level_key in (PRICE_RATING_LOW, PRICE_RATING_HIGH):
|
||||||
show_config = {"legend_value": False, "in_header": "raw"}
|
show_config = {"legend_value": False, "in_header": "raw", "in_legend": not hide_from_legend}
|
||||||
else:
|
else:
|
||||||
show_config = {"legend_value": False, "in_header": False}
|
show_config = {"legend_value": False, "in_header": False, "in_legend": not hide_from_legend}
|
||||||
|
|
||||||
series.append(
|
series.append(
|
||||||
{
|
{
|
||||||
|
|
@ -463,6 +536,11 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
||||||
day_translated = get_translation(["selector", "day", "options", day], user_language) or day.capitalize()
|
day_translated = get_translation(["selector", "day", "options", day], user_language) or day.capitalize()
|
||||||
title = f"{title} - {day_translated}"
|
title = f"{title} - {day_translated}"
|
||||||
|
|
||||||
|
# Add hourly suffix to title when using hourly resolution
|
||||||
|
if resolution == "hourly":
|
||||||
|
hourly_suffix = get_translation(["apexcharts", "hourly_suffix"], user_language) or "(Ø hourly)"
|
||||||
|
title = f"{title} {hourly_suffix}"
|
||||||
|
|
||||||
# Configure span based on selected day
|
# Configure span based on selected day
|
||||||
# For rolling window modes, use config-template-card for dynamic config
|
# For rolling window modes, use config-template-card for dynamic config
|
||||||
if day == "yesterday":
|
if day == "yesterday":
|
||||||
|
|
@ -522,10 +600,23 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"dataLabels": {"enabled": False},
|
"dataLabels": {"enabled": False},
|
||||||
|
# Legend is shown only when peak price is enabled (for toggling visibility)
|
||||||
|
# - Only best price: no legend needed
|
||||||
|
# - Peak price (with or without best): show legend for toggle
|
||||||
"legend": {
|
"legend": {
|
||||||
"show": False,
|
"show": highlight_peak_price,
|
||||||
"position": "bottom",
|
"position": "bottom",
|
||||||
"horizontalAlign": "center",
|
"horizontalAlign": "center",
|
||||||
|
# Custom markers only when overlays are enabled (hide color dots, use text icons)
|
||||||
|
# Without overlays: use default markers so user can enable legend with just show: true
|
||||||
|
**(
|
||||||
|
{
|
||||||
|
"markers": {"size": 0},
|
||||||
|
"itemMargin": {"horizontal": 15},
|
||||||
|
}
|
||||||
|
if highlight_peak_price
|
||||||
|
else {}
|
||||||
|
),
|
||||||
},
|
},
|
||||||
"grid": {
|
"grid": {
|
||||||
"show": True,
|
"show": True,
|
||||||
|
|
@ -546,7 +637,9 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
||||||
},
|
},
|
||||||
"tooltip": {
|
"tooltip": {
|
||||||
"enabled": True,
|
"enabled": True,
|
||||||
"enabledOnSeries": [1, 2, 3, 4, 5] if highlight_best_price else [0, 1, 2, 3, 4],
|
"shared": True, # Combine tooltips from all series at same x-value
|
||||||
|
# enabledOnSeries will be set dynamically below based on overlays
|
||||||
|
"enabledOnSeries": [],
|
||||||
"marker": {
|
"marker": {
|
||||||
"show": False,
|
"show": False,
|
||||||
},
|
},
|
||||||
|
|
@ -566,6 +659,10 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
||||||
"max": 1,
|
"max": 1,
|
||||||
"show": False, # Hide this axis (only for highlight overlay)
|
"show": False, # Hide this axis (only for highlight overlay)
|
||||||
"opposite": True,
|
"opposite": True,
|
||||||
|
"apex_config": {
|
||||||
|
"forceNiceScale": True,
|
||||||
|
"tickAmount": 4,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
"now": (
|
"now": (
|
||||||
|
|
@ -579,6 +676,15 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
||||||
"series": series,
|
"series": series,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Dynamically set tooltip enabledOnSeries to exclude overlay indices
|
||||||
|
overlay_count = (1 if best_overlay_added else 0) + (1 if peak_overlay_added else 0)
|
||||||
|
result["apex_config"]["tooltip"]["enabledOnSeries"] = list(range(overlay_count, len(series)))
|
||||||
|
|
||||||
|
# Enable hidden_by_default experimental feature when peak price is enabled
|
||||||
|
# This allows peak price overlay to start hidden but be toggled via legend click
|
||||||
|
if highlight_peak_price:
|
||||||
|
result["experimental"] = {"hidden_by_default": True}
|
||||||
|
|
||||||
# For rolling window mode and today_tomorrow, wrap in config-template-card for dynamic config
|
# For rolling window mode and today_tomorrow, wrap in config-template-card for dynamic config
|
||||||
if use_template:
|
if use_template:
|
||||||
# Find tomorrow_data_available binary sensor
|
# Find tomorrow_data_available binary sensor
|
||||||
|
|
@ -694,6 +800,8 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
||||||
"title": {"text": price_unit},
|
"title": {"text": price_unit},
|
||||||
"decimalsInFloat": 0 if use_subunit else 1,
|
"decimalsInFloat": 0 if use_subunit else 1,
|
||||||
"forceNiceScale": True,
|
"forceNiceScale": True,
|
||||||
|
"showAlways": True,
|
||||||
|
"tickAmount": 4,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -712,6 +820,8 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
||||||
"title": {"text": price_unit},
|
"title": {"text": price_unit},
|
||||||
"decimalsInFloat": 0 if use_subunit else 1,
|
"decimalsInFloat": 0 if use_subunit else 1,
|
||||||
"forceNiceScale": True,
|
"forceNiceScale": True,
|
||||||
|
"showAlways": True,
|
||||||
|
"tickAmount": 4,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -742,6 +852,10 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
||||||
"max": 1,
|
"max": 1,
|
||||||
"show": False,
|
"show": False,
|
||||||
"opposite": True,
|
"opposite": True,
|
||||||
|
"apex_config": {
|
||||||
|
"forceNiceScale": True,
|
||||||
|
"tickAmount": 4,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
"apex_config": {
|
"apex_config": {
|
||||||
|
|
@ -851,6 +965,8 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
||||||
"title": {"text": price_unit},
|
"title": {"text": price_unit},
|
||||||
"decimalsInFloat": 0 if use_subunit else 1,
|
"decimalsInFloat": 0 if use_subunit else 1,
|
||||||
"forceNiceScale": True,
|
"forceNiceScale": True,
|
||||||
|
"showAlways": True,
|
||||||
|
"tickAmount": 4,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -869,6 +985,8 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
||||||
"title": {"text": price_unit},
|
"title": {"text": price_unit},
|
||||||
"decimalsInFloat": 0 if use_subunit else 1,
|
"decimalsInFloat": 0 if use_subunit else 1,
|
||||||
"forceNiceScale": True,
|
"forceNiceScale": True,
|
||||||
|
"showAlways": True,
|
||||||
|
"tickAmount": 4,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -901,6 +1019,10 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
||||||
"max": 1,
|
"max": 1,
|
||||||
"show": False,
|
"show": False,
|
||||||
"opposite": True,
|
"opposite": True,
|
||||||
|
"apex_config": {
|
||||||
|
"forceNiceScale": True,
|
||||||
|
"tickAmount": 4,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
"apex_config": {
|
"apex_config": {
|
||||||
|
|
|
||||||
|
|
@ -36,11 +36,13 @@ from custom_components.tibber_prices.const import (
|
||||||
DOMAIN,
|
DOMAIN,
|
||||||
PRICE_LEVEL_CHEAP,
|
PRICE_LEVEL_CHEAP,
|
||||||
PRICE_LEVEL_EXPENSIVE,
|
PRICE_LEVEL_EXPENSIVE,
|
||||||
|
PRICE_LEVEL_MAPPING,
|
||||||
PRICE_LEVEL_NORMAL,
|
PRICE_LEVEL_NORMAL,
|
||||||
PRICE_LEVEL_VERY_CHEAP,
|
PRICE_LEVEL_VERY_CHEAP,
|
||||||
PRICE_LEVEL_VERY_EXPENSIVE,
|
PRICE_LEVEL_VERY_EXPENSIVE,
|
||||||
PRICE_RATING_HIGH,
|
PRICE_RATING_HIGH,
|
||||||
PRICE_RATING_LOW,
|
PRICE_RATING_LOW,
|
||||||
|
PRICE_RATING_MAPPING,
|
||||||
PRICE_RATING_NORMAL,
|
PRICE_RATING_NORMAL,
|
||||||
format_price_unit_base,
|
format_price_unit_base,
|
||||||
format_price_unit_subunit,
|
format_price_unit_subunit,
|
||||||
|
|
@ -52,13 +54,44 @@ from custom_components.tibber_prices.coordinator.helpers import (
|
||||||
)
|
)
|
||||||
from homeassistant.exceptions import ServiceValidationError
|
from homeassistant.exceptions import ServiceValidationError
|
||||||
|
|
||||||
from .formatters import aggregate_hourly_exact, get_period_data, normalize_level_filter, normalize_rating_level_filter
|
from .formatters import (
|
||||||
|
aggregate_to_hourly,
|
||||||
|
get_period_data,
|
||||||
|
normalize_level_filter,
|
||||||
|
normalize_rating_level_filter,
|
||||||
|
)
|
||||||
from .helpers import get_entry_and_data, has_tomorrow_data
|
from .helpers import get_entry_and_data, has_tomorrow_data
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from homeassistant.core import ServiceCall
|
from homeassistant.core import ServiceCall
|
||||||
|
|
||||||
|
|
||||||
|
def _is_transition_to_more_expensive(
|
||||||
|
current_value: str | None,
|
||||||
|
next_value: str | None,
|
||||||
|
*,
|
||||||
|
use_rating: bool = False,
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Check if transition from current to next level/rating is to a more expensive segment.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
current_value: Current level or rating value
|
||||||
|
next_value: Next level or rating value
|
||||||
|
use_rating: If True, use rating hierarchy; if False, use level hierarchy
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if transitioning to a more expensive segment
|
||||||
|
|
||||||
|
"""
|
||||||
|
hierarchy = PRICE_RATING_MAPPING if use_rating else PRICE_LEVEL_MAPPING
|
||||||
|
|
||||||
|
current_rank = hierarchy.get(current_value, 0) if current_value else 0
|
||||||
|
next_rank = hierarchy.get(next_value, 0) if next_value else 0
|
||||||
|
|
||||||
|
return next_rank > current_rank
|
||||||
|
|
||||||
|
|
||||||
def _calculate_metadata( # noqa: PLR0912, PLR0913, PLR0915
|
def _calculate_metadata( # noqa: PLR0912, PLR0913, PLR0915
|
||||||
chart_data: list[dict[str, Any]],
|
chart_data: list[dict[str, Any]],
|
||||||
price_field: str,
|
price_field: str,
|
||||||
|
|
@ -195,16 +228,29 @@ def _calculate_metadata( # noqa: PLR0912, PLR0913, PLR0915
|
||||||
# Determine interval duration in minutes based on resolution
|
# Determine interval duration in minutes based on resolution
|
||||||
interval_duration_minutes = 15 if resolution == "interval" else 60
|
interval_duration_minutes = 15 if resolution == "interval" else 60
|
||||||
|
|
||||||
# Calculate suggested yaxis bounds
|
# Calculate suggested yaxis bounds with proportional padding
|
||||||
# For subunit currency (ct, øre): integer values (floor/ceil)
|
# Goal: Same visual "airiness" regardless of price range
|
||||||
# For base currency (€, kr): 2 decimal places precision
|
# Strategy: Add padding proportional to data range (min/max spread)
|
||||||
if subunit_currency:
|
if combined_stats:
|
||||||
yaxis_min = math.floor(combined_stats["min"]) - 1 if combined_stats else 0
|
data_range = combined_stats["max"] - combined_stats["min"]
|
||||||
yaxis_max = math.ceil(combined_stats["max"]) + 1 if combined_stats else 100
|
|
||||||
|
# Calculate padding: ~8% of data range below min, ~15% above max
|
||||||
|
# These percentages match the visual spacing seen in well-scaled charts
|
||||||
|
padding_below = data_range * 0.08
|
||||||
|
padding_above = data_range * 0.15
|
||||||
|
|
||||||
|
if subunit_currency:
|
||||||
|
# Subunit (ct, øre): round to 1 decimal for cleaner axis labels
|
||||||
|
yaxis_min = round(combined_stats["min"] - padding_below, 1)
|
||||||
|
yaxis_max = round(combined_stats["max"] + padding_above, 1)
|
||||||
|
else:
|
||||||
|
# Base currency (€, kr): round to 2 decimals
|
||||||
|
yaxis_min = round(combined_stats["min"] - padding_below, 2)
|
||||||
|
yaxis_max = round(combined_stats["max"] + padding_above, 2)
|
||||||
else:
|
else:
|
||||||
# Base currency: round to 2 decimal places with padding
|
# Fallback for empty data
|
||||||
yaxis_min = round(math.floor(combined_stats["min"] * 100) / 100 - 0.01, 2) if combined_stats else 0
|
yaxis_min = 0
|
||||||
yaxis_max = round(math.ceil(combined_stats["max"] * 100) / 100 + 0.01, 2) if combined_stats else 1.0
|
yaxis_max = 100 if subunit_currency else 1.0
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"currency": currency_obj,
|
"currency": currency_obj,
|
||||||
|
|
@ -455,19 +501,26 @@ async def handle_chartdata(call: ServiceCall) -> dict[str, Any]: # noqa: PLR091
|
||||||
all_timestamps = {interval["startsAt"] for interval in day_intervals if interval.get("startsAt")}
|
all_timestamps = {interval["startsAt"] for interval in day_intervals if interval.get("startsAt")}
|
||||||
all_timestamps = sorted(all_timestamps)
|
all_timestamps = sorted(all_timestamps)
|
||||||
|
|
||||||
# Calculate average if requested
|
# Calculate average if requested (per day for average_field)
|
||||||
day_averages = {}
|
# Also build a mapping from date -> day_key for later lookup
|
||||||
if include_average:
|
day_averages: dict[str, float] = {}
|
||||||
for day in days:
|
date_to_day_key: dict[Any, str] = {} # Maps date object to "yesterday"/"today"/"tomorrow"
|
||||||
# Use helper to get intervals for this day
|
|
||||||
# Build minimal coordinator_data for single day query
|
|
||||||
# Map day key to offset: yesterday=-1, today=0, tomorrow=1
|
|
||||||
day_offset = {"yesterday": -1, "today": 0, "tomorrow": 1}[day]
|
|
||||||
day_intervals = get_intervals_for_day_offsets(coordinator.data, [day_offset])
|
|
||||||
|
|
||||||
# Collect prices from intervals
|
for day in days:
|
||||||
|
# Use helper to get intervals for this day
|
||||||
|
# Map day key to offset: yesterday=-1, today=0, tomorrow=1
|
||||||
|
day_offset = {"yesterday": -1, "today": 0, "tomorrow": 1}[day]
|
||||||
|
day_intervals = get_intervals_for_day_offsets(coordinator.data, [day_offset])
|
||||||
|
|
||||||
|
# Build date -> day_key mapping from actual interval data
|
||||||
|
for interval in day_intervals:
|
||||||
|
start_time = interval.get("startsAt")
|
||||||
|
if start_time and hasattr(start_time, "date"):
|
||||||
|
date_to_day_key[start_time.date()] = day
|
||||||
|
|
||||||
|
# Calculate average if requested
|
||||||
|
if include_average:
|
||||||
prices = [p["total"] for p in day_intervals if p.get("total") is not None]
|
prices = [p["total"] for p in day_intervals if p.get("total") is not None]
|
||||||
|
|
||||||
if prices:
|
if prices:
|
||||||
avg = sum(prices) / len(prices)
|
avg = sum(prices) / len(prices)
|
||||||
# Apply same transformations as to regular prices
|
# Apply same transformations as to regular prices
|
||||||
|
|
@ -476,134 +529,222 @@ async def handle_chartdata(call: ServiceCall) -> dict[str, Any]: # noqa: PLR091
|
||||||
avg = round(avg, round_decimals)
|
avg = round(avg, round_decimals)
|
||||||
day_averages[day] = avg
|
day_averages[day] = avg
|
||||||
|
|
||||||
for day in days:
|
# Collect ALL intervals for the selected days as one continuous list
|
||||||
# Use helper to get intervals for this day
|
# This simplifies processing - no special midnight handling needed
|
||||||
# Map day key to offset: yesterday=-1, today=0, tomorrow=1
|
day_offsets = [{"yesterday": -1, "today": 0, "tomorrow": 1}[day] for day in days]
|
||||||
day_offset = {"yesterday": -1, "today": 0, "tomorrow": 1}[day]
|
all_prices = get_intervals_for_day_offsets(coordinator.data, day_offsets)
|
||||||
day_prices = get_intervals_for_day_offsets(coordinator.data, [day_offset])
|
|
||||||
|
|
||||||
if resolution == "interval":
|
# For hourly resolution, aggregate BEFORE processing
|
||||||
# Original 15-minute intervals
|
# This keeps the same data format (startsAt, total, level, rating_level)
|
||||||
if insert_nulls == "all" and (level_filter or rating_level_filter):
|
# so all subsequent code (filters, insert_nulls, etc.) works unchanged
|
||||||
# Mode 'all': Insert NULL for all timestamps where filter doesn't match
|
if resolution == "hourly":
|
||||||
# Build a map of timestamp -> interval for quick lookup
|
all_prices = aggregate_to_hourly(
|
||||||
interval_map = {
|
all_prices,
|
||||||
interval.get("startsAt"): interval for interval in day_prices if interval.get("startsAt")
|
coordinator=coordinator,
|
||||||
|
threshold_low=threshold_low,
|
||||||
|
threshold_high=threshold_high,
|
||||||
|
)
|
||||||
|
# Also update all_timestamps for insert_nulls='all' mode
|
||||||
|
all_timestamps = sorted({interval["startsAt"] for interval in all_prices if interval.get("startsAt")})
|
||||||
|
|
||||||
|
# Helper to get day key from interval timestamp for average lookup
|
||||||
|
def _get_day_key_for_interval(interval_start: Any) -> str | None:
|
||||||
|
"""Determine which day key (yesterday/today/tomorrow) an interval belongs to."""
|
||||||
|
if not interval_start or not hasattr(interval_start, "date"):
|
||||||
|
return None
|
||||||
|
# Use pre-built mapping from actual interval data (TimeService-compatible)
|
||||||
|
return date_to_day_key.get(interval_start.date())
|
||||||
|
|
||||||
|
# Process price data - same logic handles both interval and hourly resolution
|
||||||
|
# (hourly data was already aggregated above, but has the same format)
|
||||||
|
if resolution in ("interval", "hourly"):
|
||||||
|
if insert_nulls == "all" and (level_filter or rating_level_filter):
|
||||||
|
# Mode 'all': Insert NULL for all timestamps where filter doesn't match
|
||||||
|
# Build a map of timestamp -> interval for quick lookup
|
||||||
|
interval_map = {interval.get("startsAt"): interval for interval in all_prices if interval.get("startsAt")}
|
||||||
|
|
||||||
|
# Process all timestamps, filling gaps with NULL
|
||||||
|
for start_time in all_timestamps:
|
||||||
|
interval = interval_map.get(start_time)
|
||||||
|
|
||||||
|
if interval is None:
|
||||||
|
# No data for this timestamp - skip entirely
|
||||||
|
continue
|
||||||
|
|
||||||
|
price = interval.get("total")
|
||||||
|
if price is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if this interval matches the filter
|
||||||
|
matches_filter = False
|
||||||
|
if level_filter and "level" in interval:
|
||||||
|
matches_filter = interval["level"] in level_filter
|
||||||
|
elif rating_level_filter and "rating_level" in interval:
|
||||||
|
matches_filter = interval["rating_level"] in rating_level_filter
|
||||||
|
|
||||||
|
# If filter is set but doesn't match, insert NULL price
|
||||||
|
if not matches_filter:
|
||||||
|
price = None
|
||||||
|
elif price is not None:
|
||||||
|
# Convert to subunit currency (cents/øre) if requested
|
||||||
|
price = round(price * 100, 2) if subunit_currency else round(price, 4)
|
||||||
|
# Apply custom rounding if specified
|
||||||
|
if round_decimals is not None:
|
||||||
|
price = round(price, round_decimals)
|
||||||
|
|
||||||
|
data_point = {
|
||||||
|
start_time_field: start_time.isoformat() if hasattr(start_time, "isoformat") else start_time,
|
||||||
|
price_field: price,
|
||||||
}
|
}
|
||||||
|
|
||||||
# Process all timestamps, filling gaps with NULL
|
# Add level if requested (only when price is not NULL)
|
||||||
for start_time in all_timestamps:
|
if include_level and "level" in interval and price is not None:
|
||||||
interval = interval_map.get(start_time)
|
data_point[level_field] = interval["level"]
|
||||||
|
|
||||||
if interval is None:
|
# Add rating_level if requested (only when price is not NULL)
|
||||||
# No data for this timestamp - skip entirely
|
if include_rating_level and "rating_level" in interval and price is not None:
|
||||||
continue
|
data_point[rating_level_field] = interval["rating_level"]
|
||||||
|
|
||||||
price = interval.get("total")
|
# Add average if requested
|
||||||
if price is None:
|
day_key = _get_day_key_for_interval(start_time)
|
||||||
continue
|
if include_average and day_key and day_key in day_averages:
|
||||||
|
data_point[average_field] = day_averages[day_key]
|
||||||
|
|
||||||
# Check if this interval matches the filter
|
chart_data.append(data_point)
|
||||||
matches_filter = False
|
|
||||||
if level_filter and "level" in interval:
|
|
||||||
matches_filter = interval["level"] in level_filter
|
|
||||||
elif rating_level_filter and "rating_level" in interval:
|
|
||||||
matches_filter = interval["rating_level"] in rating_level_filter
|
|
||||||
|
|
||||||
# If filter is set but doesn't match, insert NULL price
|
elif insert_nulls == "segments" and (level_filter or rating_level_filter):
|
||||||
if not matches_filter:
|
# Mode 'segments': Add NULL points at segment boundaries for clean gaps
|
||||||
price = None
|
# Process ALL intervals as one continuous list - no special midnight handling needed
|
||||||
elif price is not None:
|
filter_field = "rating_level" if rating_level_filter else "level"
|
||||||
# Convert to subunit currency (cents/øre) if requested
|
filter_values = rating_level_filter if rating_level_filter else level_filter
|
||||||
price = round(price * 100, 2) if subunit_currency else round(price, 4)
|
use_rating = rating_level_filter is not None
|
||||||
# Apply custom rounding if specified
|
|
||||||
if round_decimals is not None:
|
|
||||||
price = round(price, round_decimals)
|
|
||||||
|
|
||||||
|
for i in range(len(all_prices) - 1):
|
||||||
|
interval = all_prices[i]
|
||||||
|
next_interval = all_prices[i + 1]
|
||||||
|
|
||||||
|
start_time = interval.get("startsAt")
|
||||||
|
price = interval.get("total")
|
||||||
|
next_price = next_interval.get("total")
|
||||||
|
next_start_time = next_interval.get("startsAt")
|
||||||
|
|
||||||
|
if start_time is None or price is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
interval_value = interval.get(filter_field)
|
||||||
|
next_value = next_interval.get(filter_field)
|
||||||
|
prev_value = all_prices[i - 1].get(filter_field) if i > 0 else None
|
||||||
|
prev_price = all_prices[i - 1].get("total") if i > 0 else None
|
||||||
|
|
||||||
|
# Check if current interval matches filter
|
||||||
|
if interval_value in filter_values: # type: ignore[operator]
|
||||||
|
# Convert price
|
||||||
|
converted_price = round(price * 100, 2) if subunit_currency else round(price, 4)
|
||||||
|
if round_decimals is not None:
|
||||||
|
converted_price = round(converted_price, round_decimals)
|
||||||
|
|
||||||
|
# Check if this is the START of a new segment (previous interval had different level)
|
||||||
|
# and the transition was from a CHEAPER level (price increase)
|
||||||
|
is_segment_start = prev_value != interval_value and prev_value not in filter_values # type: ignore[operator]
|
||||||
|
is_from_cheaper = (
|
||||||
|
_is_transition_to_more_expensive(prev_value, interval_value, use_rating=use_rating)
|
||||||
|
if prev_value
|
||||||
|
else False
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add current point FIRST (tooltip will show here - at the actual price!)
|
||||||
data_point = {
|
data_point = {
|
||||||
start_time_field: start_time.isoformat() if hasattr(start_time, "isoformat") else start_time,
|
start_time_field: start_time.isoformat() if hasattr(start_time, "isoformat") else start_time,
|
||||||
price_field: price,
|
price_field: converted_price,
|
||||||
}
|
}
|
||||||
|
|
||||||
# Add level if requested (only when price is not NULL)
|
if include_level and "level" in interval:
|
||||||
if include_level and "level" in interval and price is not None:
|
|
||||||
data_point[level_field] = interval["level"]
|
data_point[level_field] = interval["level"]
|
||||||
|
if include_rating_level and "rating_level" in interval:
|
||||||
# Add rating_level if requested (only when price is not NULL)
|
|
||||||
if include_rating_level and "rating_level" in interval and price is not None:
|
|
||||||
data_point[rating_level_field] = interval["rating_level"]
|
data_point[rating_level_field] = interval["rating_level"]
|
||||||
|
|
||||||
# Add average if requested
|
day_key = _get_day_key_for_interval(start_time)
|
||||||
if include_average and day in day_averages:
|
if include_average and day_key and day_key in day_averages:
|
||||||
data_point[average_field] = day_averages[day]
|
data_point[average_field] = day_averages[day_key]
|
||||||
|
|
||||||
chart_data.append(data_point)
|
chart_data.append(data_point)
|
||||||
elif insert_nulls == "segments" and (level_filter or rating_level_filter):
|
|
||||||
# Mode 'segments': Add NULL points at segment boundaries for clean gaps
|
|
||||||
# Determine which field to check based on filter type
|
|
||||||
filter_field = "rating_level" if rating_level_filter else "level"
|
|
||||||
filter_values = rating_level_filter if rating_level_filter else level_filter
|
|
||||||
|
|
||||||
for i in range(len(day_prices) - 1):
|
# AFTER the real point: Add END-BRIDGE to draw vertical line DOWN to previous price
|
||||||
interval = day_prices[i]
|
# This ensures the vertical upward transition line is drawn in THIS (more expensive) color
|
||||||
next_interval = day_prices[i + 1]
|
# but the tooltip shows the actual (higher) price
|
||||||
|
if connect_segments and is_segment_start and is_from_cheaper and prev_price is not None:
|
||||||
start_time = interval.get("startsAt")
|
converted_prev_price = round(prev_price * 100, 2) if subunit_currency else round(prev_price, 4)
|
||||||
price = interval.get("total")
|
|
||||||
next_price = next_interval.get("total")
|
|
||||||
next_start_time = next_interval.get("startsAt")
|
|
||||||
|
|
||||||
if start_time is None or price is None:
|
|
||||||
continue
|
|
||||||
|
|
||||||
interval_value = interval.get(filter_field)
|
|
||||||
next_value = next_interval.get(filter_field)
|
|
||||||
|
|
||||||
# Check if current interval matches filter
|
|
||||||
if interval_value in filter_values: # type: ignore[operator]
|
|
||||||
# Convert price
|
|
||||||
converted_price = round(price * 100, 2) if subunit_currency else round(price, 4)
|
|
||||||
if round_decimals is not None:
|
if round_decimals is not None:
|
||||||
converted_price = round(converted_price, round_decimals)
|
converted_prev_price = round(converted_prev_price, round_decimals)
|
||||||
|
|
||||||
# Add current point
|
# End-bridge: draws line DOWN to previous (cheaper) price
|
||||||
data_point = {
|
end_bridge = {
|
||||||
start_time_field: start_time.isoformat()
|
start_time_field: start_time.isoformat()
|
||||||
if hasattr(start_time, "isoformat")
|
if hasattr(start_time, "isoformat")
|
||||||
else start_time,
|
else start_time,
|
||||||
price_field: converted_price,
|
price_field: converted_prev_price, # Go DOWN to previous (cheaper) price
|
||||||
}
|
}
|
||||||
|
|
||||||
if include_level and "level" in interval:
|
if include_level and "level" in interval:
|
||||||
data_point[level_field] = interval["level"]
|
end_bridge[level_field] = interval["level"] # Keep THIS level for color
|
||||||
if include_rating_level and "rating_level" in interval:
|
if include_rating_level and "rating_level" in interval:
|
||||||
data_point[rating_level_field] = interval["rating_level"]
|
end_bridge[rating_level_field] = interval["rating_level"]
|
||||||
if include_average and day in day_averages:
|
if include_average and day_key and day_key in day_averages:
|
||||||
data_point[average_field] = day_averages[day]
|
end_bridge[average_field] = day_averages[day_key]
|
||||||
|
chart_data.append(end_bridge)
|
||||||
|
|
||||||
chart_data.append(data_point)
|
# NULL to stop this "bridge sequence" - prevents line from going to next point
|
||||||
|
null_point = {start_time_field: data_point[start_time_field], price_field: None}
|
||||||
|
chart_data.append(null_point)
|
||||||
|
|
||||||
# Check if next interval is different level (segment boundary)
|
chart_data.append(data_point)
|
||||||
if next_value != interval_value:
|
|
||||||
next_start_serialized = (
|
|
||||||
next_start_time.isoformat()
|
|
||||||
if next_start_time and hasattr(next_start_time, "isoformat")
|
|
||||||
else next_start_time
|
|
||||||
)
|
|
||||||
|
|
||||||
if connect_segments and next_price is not None:
|
# Check if next interval is different level (segment boundary = END of this segment)
|
||||||
# Connect segments visually by adding bridge point + NULL
|
if next_value != interval_value:
|
||||||
# Bridge point: extends current series to boundary with next price
|
next_start_serialized = (
|
||||||
# NULL point: stops series so it doesn't continue into next segment
|
next_start_time.isoformat()
|
||||||
|
if next_start_time and hasattr(next_start_time, "isoformat")
|
||||||
|
else next_start_time
|
||||||
|
)
|
||||||
|
|
||||||
|
is_to_more_expensive = _is_transition_to_more_expensive(
|
||||||
|
interval_value, next_value, use_rating=use_rating
|
||||||
|
)
|
||||||
|
|
||||||
|
if connect_segments and next_price is not None:
|
||||||
|
# Connect segments visually at boundaries
|
||||||
|
# Strategy: The vertical line should be drawn by the MORE EXPENSIVE segment
|
||||||
|
#
|
||||||
|
# - Price INCREASE (cheap → expensive): Vertical line belongs to NEXT segment
|
||||||
|
# → THIS segment just holds at current price, NEXT segment draws the bridge UP
|
||||||
|
# → We add a hold point here, the start-bridge logic handles the NEXT segment
|
||||||
|
#
|
||||||
|
# - Price DECREASE (expensive → cheap): Vertical line belongs to THIS segment
|
||||||
|
# → THIS segment draws the bridge DOWN to next price
|
||||||
|
|
||||||
|
if is_to_more_expensive:
|
||||||
|
# Transition to MORE EXPENSIVE level (price increase)
|
||||||
|
# Just hold at current price - the NEXT segment will draw the upward line
|
||||||
|
# via its start-bridge logic
|
||||||
|
hold_point = {
|
||||||
|
start_time_field: next_start_serialized,
|
||||||
|
price_field: converted_price, # Hold at CURRENT price
|
||||||
|
}
|
||||||
|
if include_level and "level" in interval:
|
||||||
|
hold_point[level_field] = interval["level"]
|
||||||
|
if include_rating_level and "rating_level" in interval:
|
||||||
|
hold_point[rating_level_field] = interval["rating_level"]
|
||||||
|
if include_average and day_key and day_key in day_averages:
|
||||||
|
hold_point[average_field] = day_averages[day_key]
|
||||||
|
chart_data.append(hold_point)
|
||||||
|
else:
|
||||||
|
# Transition to LESS EXPENSIVE or SAME level (price decrease/stable)
|
||||||
|
# Draw the bridge DOWN to the next price in THIS level's color
|
||||||
converted_next_price = (
|
converted_next_price = (
|
||||||
round(next_price * 100, 2) if subunit_currency else round(next_price, 4)
|
round(next_price * 100, 2) if subunit_currency else round(next_price, 4)
|
||||||
)
|
)
|
||||||
if round_decimals is not None:
|
if round_decimals is not None:
|
||||||
converted_next_price = round(converted_next_price, round_decimals)
|
converted_next_price = round(converted_next_price, round_decimals)
|
||||||
|
|
||||||
# 1. Bridge point: boundary with next price, still current level
|
|
||||||
# This makes the line go up/down to meet the next series
|
|
||||||
bridge_point = {
|
bridge_point = {
|
||||||
start_time_field: next_start_serialized,
|
start_time_field: next_start_serialized,
|
||||||
price_field: converted_next_price,
|
price_field: converted_next_price,
|
||||||
|
|
@ -612,173 +753,136 @@ async def handle_chartdata(call: ServiceCall) -> dict[str, Any]: # noqa: PLR091
|
||||||
bridge_point[level_field] = interval["level"]
|
bridge_point[level_field] = interval["level"]
|
||||||
if include_rating_level and "rating_level" in interval:
|
if include_rating_level and "rating_level" in interval:
|
||||||
bridge_point[rating_level_field] = interval["rating_level"]
|
bridge_point[rating_level_field] = interval["rating_level"]
|
||||||
if include_average and day in day_averages:
|
if include_average and day_key and day_key in day_averages:
|
||||||
bridge_point[average_field] = day_averages[day]
|
bridge_point[average_field] = day_averages[day_key]
|
||||||
chart_data.append(bridge_point)
|
chart_data.append(bridge_point)
|
||||||
|
|
||||||
# 2. NULL point: stops the current series
|
# NULL point: stops the current series
|
||||||
# Without this, ApexCharts continues drawing within the series
|
null_point = {start_time_field: next_start_serialized, price_field: None}
|
||||||
null_point = {start_time_field: next_start_serialized, price_field: None}
|
chart_data.append(null_point)
|
||||||
chart_data.append(null_point)
|
else:
|
||||||
else:
|
# Original behavior: Hold current price until next timestamp
|
||||||
# Original behavior: Hold current price until next timestamp
|
hold_point = {
|
||||||
hold_point = {
|
start_time_field: next_start_serialized,
|
||||||
start_time_field: next_start_serialized,
|
price_field: converted_price,
|
||||||
price_field: converted_price,
|
}
|
||||||
}
|
if include_level and "level" in interval:
|
||||||
if include_level and "level" in interval:
|
hold_point[level_field] = interval["level"]
|
||||||
hold_point[level_field] = interval["level"]
|
if include_rating_level and "rating_level" in interval:
|
||||||
if include_rating_level and "rating_level" in interval:
|
hold_point[rating_level_field] = interval["rating_level"]
|
||||||
hold_point[rating_level_field] = interval["rating_level"]
|
if include_average and day_key and day_key in day_averages:
|
||||||
if include_average and day in day_averages:
|
hold_point[average_field] = day_averages[day_key]
|
||||||
hold_point[average_field] = day_averages[day]
|
chart_data.append(hold_point)
|
||||||
chart_data.append(hold_point)
|
|
||||||
|
|
||||||
# Add NULL point to create gap
|
# Add NULL point to create gap
|
||||||
null_point = {start_time_field: next_start_serialized, price_field: None}
|
null_point = {start_time_field: next_start_serialized, price_field: None}
|
||||||
chart_data.append(null_point)
|
chart_data.append(null_point)
|
||||||
|
|
||||||
# Handle last interval of the day - extend to midnight
|
# Handle LAST interval of the entire selection (not per-day)
|
||||||
if day_prices:
|
# The main loop processes up to n-1, so we need to add the last interval
|
||||||
last_interval = day_prices[-1]
|
if all_prices:
|
||||||
last_start_time = last_interval.get("startsAt")
|
last_interval = all_prices[-1]
|
||||||
last_price = last_interval.get("total")
|
last_start_time = last_interval.get("startsAt")
|
||||||
last_value = last_interval.get(filter_field)
|
last_price = last_interval.get("total")
|
||||||
|
last_value = last_interval.get(filter_field)
|
||||||
|
|
||||||
if last_start_time and last_price is not None and last_value in filter_values: # type: ignore[operator]
|
if last_start_time and last_price is not None and last_value in filter_values: # type: ignore[operator]
|
||||||
# Timestamp is already datetime in local timezone
|
# Add the last interval as a data point
|
||||||
last_dt = last_start_time # Already datetime object
|
converted_last_price = round(last_price * 100, 2) if subunit_currency else round(last_price, 4)
|
||||||
if last_dt:
|
if round_decimals is not None:
|
||||||
# Calculate next day at 00:00
|
converted_last_price = round(converted_last_price, round_decimals)
|
||||||
next_day = last_dt.replace(hour=0, minute=0, second=0, microsecond=0)
|
|
||||||
next_day = next_day + timedelta(days=1)
|
|
||||||
midnight_timestamp = next_day.isoformat()
|
|
||||||
|
|
||||||
# Try to get real price from tomorrow's first interval
|
last_data_point = {
|
||||||
next_day_name = None
|
start_time_field: last_start_time.isoformat()
|
||||||
if day == "yesterday":
|
if hasattr(last_start_time, "isoformat")
|
||||||
next_day_name = "today"
|
else last_start_time,
|
||||||
elif day == "today":
|
price_field: converted_last_price,
|
||||||
next_day_name = "tomorrow"
|
}
|
||||||
# For "tomorrow", we don't have a "day after tomorrow"
|
if include_level and "level" in last_interval:
|
||||||
|
last_data_point[level_field] = last_interval["level"]
|
||||||
|
if include_rating_level and "rating_level" in last_interval:
|
||||||
|
last_data_point[rating_level_field] = last_interval["rating_level"]
|
||||||
|
|
||||||
midnight_price = None
|
day_key = _get_day_key_for_interval(last_start_time)
|
||||||
midnight_interval = None
|
if include_average and day_key and day_key in day_averages:
|
||||||
|
last_data_point[average_field] = day_averages[day_key]
|
||||||
|
chart_data.append(last_data_point)
|
||||||
|
|
||||||
if next_day_name:
|
# Extend to end of selected time range (midnight after last day)
|
||||||
# Use helper to get first interval of next day
|
last_dt = last_start_time
|
||||||
# Map day key to offset: yesterday=-1, today=0, tomorrow=1
|
if last_dt:
|
||||||
next_day_offset = {"yesterday": -1, "today": 0, "tomorrow": 1}[next_day_name]
|
# Calculate midnight after the last interval
|
||||||
next_day_intervals = get_intervals_for_day_offsets(coordinator.data, [next_day_offset])
|
next_midnight = last_dt.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||||
if next_day_intervals:
|
next_midnight = next_midnight + timedelta(days=1)
|
||||||
first_next = next_day_intervals[0]
|
midnight_timestamp = next_midnight.isoformat()
|
||||||
first_next_value = first_next.get(filter_field)
|
|
||||||
# Only use tomorrow's price if it matches the same filter
|
|
||||||
if first_next_value == last_value:
|
|
||||||
midnight_price = first_next.get("total")
|
|
||||||
midnight_interval = first_next
|
|
||||||
|
|
||||||
# Fallback: use last interval's price if no tomorrow data or different level
|
# Add hold point at midnight
|
||||||
if midnight_price is None:
|
end_point = {start_time_field: midnight_timestamp, price_field: converted_last_price}
|
||||||
midnight_price = last_price
|
if include_level and "level" in last_interval:
|
||||||
midnight_interval = last_interval
|
end_point[level_field] = last_interval["level"]
|
||||||
|
if include_rating_level and "rating_level" in last_interval:
|
||||||
|
end_point[rating_level_field] = last_interval["rating_level"]
|
||||||
|
if include_average and day_key and day_key in day_averages:
|
||||||
|
end_point[average_field] = day_averages[day_key]
|
||||||
|
chart_data.append(end_point)
|
||||||
|
|
||||||
# Convert price
|
# Add NULL to end series
|
||||||
converted_price = (
|
null_point = {start_time_field: midnight_timestamp, price_field: None}
|
||||||
round(midnight_price * 100, 2) if subunit_currency else round(midnight_price, 4)
|
chart_data.append(null_point)
|
||||||
)
|
|
||||||
if round_decimals is not None:
|
|
||||||
converted_price = round(converted_price, round_decimals)
|
|
||||||
|
|
||||||
# Add point at midnight with appropriate price (extends graph to end of day)
|
else:
|
||||||
end_point = {start_time_field: midnight_timestamp, price_field: converted_price}
|
# Mode 'none' (default): Only return matching intervals, no NULL insertion
|
||||||
if midnight_interval is not None:
|
for interval in all_prices:
|
||||||
if include_level and "level" in midnight_interval:
|
start_time = interval.get("startsAt")
|
||||||
end_point[level_field] = midnight_interval["level"]
|
price = interval.get("total")
|
||||||
if include_rating_level and "rating_level" in midnight_interval:
|
|
||||||
end_point[rating_level_field] = midnight_interval["rating_level"]
|
|
||||||
if include_average and day in day_averages:
|
|
||||||
end_point[average_field] = day_averages[day]
|
|
||||||
chart_data.append(end_point)
|
|
||||||
else:
|
|
||||||
# Mode 'none' (default): Only return matching intervals, no NULL insertion
|
|
||||||
for interval in day_prices:
|
|
||||||
start_time = interval.get("startsAt")
|
|
||||||
price = interval.get("total")
|
|
||||||
|
|
||||||
if start_time is not None and price is not None:
|
if start_time is not None and price is not None:
|
||||||
# Apply period filter if specified
|
# Apply period filter if specified
|
||||||
if (
|
if (
|
||||||
period_filter is not None
|
period_filter is not None
|
||||||
and period_timestamps is not None
|
and period_timestamps is not None
|
||||||
and start_time not in period_timestamps
|
and start_time not in period_timestamps
|
||||||
):
|
):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Apply level filter if specified
|
# Apply level filter if specified
|
||||||
if level_filter is not None and "level" in interval and interval["level"] not in level_filter:
|
if level_filter is not None and "level" in interval and interval["level"] not in level_filter:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Apply rating_level filter if specified
|
# Apply rating_level filter if specified
|
||||||
if (
|
if (
|
||||||
rating_level_filter is not None
|
rating_level_filter is not None
|
||||||
and "rating_level" in interval
|
and "rating_level" in interval
|
||||||
and interval["rating_level"] not in rating_level_filter
|
and interval["rating_level"] not in rating_level_filter
|
||||||
):
|
):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Convert to subunit currency (cents/øre) if requested
|
# Convert to subunit currency (cents/øre) if requested
|
||||||
price = round(price * 100, 2) if subunit_currency else round(price, 4)
|
price = round(price * 100, 2) if subunit_currency else round(price, 4)
|
||||||
|
|
||||||
# Apply custom rounding if specified
|
# Apply custom rounding if specified
|
||||||
if round_decimals is not None:
|
if round_decimals is not None:
|
||||||
price = round(price, round_decimals)
|
price = round(price, round_decimals)
|
||||||
|
|
||||||
data_point = {
|
data_point = {
|
||||||
start_time_field: start_time.isoformat()
|
start_time_field: start_time.isoformat() if hasattr(start_time, "isoformat") else start_time,
|
||||||
if hasattr(start_time, "isoformat")
|
price_field: price,
|
||||||
else start_time,
|
}
|
||||||
price_field: price,
|
|
||||||
}
|
|
||||||
|
|
||||||
# Add level if requested
|
# Add level if requested
|
||||||
if include_level and "level" in interval:
|
if include_level and "level" in interval:
|
||||||
data_point[level_field] = interval["level"]
|
data_point[level_field] = interval["level"]
|
||||||
|
|
||||||
# Add rating_level if requested
|
# Add rating_level if requested
|
||||||
if include_rating_level and "rating_level" in interval:
|
if include_rating_level and "rating_level" in interval:
|
||||||
data_point[rating_level_field] = interval["rating_level"]
|
data_point[rating_level_field] = interval["rating_level"]
|
||||||
|
|
||||||
# Add average if requested
|
# Add average if requested
|
||||||
if include_average and day in day_averages:
|
day_key = _get_day_key_for_interval(start_time)
|
||||||
data_point[average_field] = day_averages[day]
|
if include_average and day_key and day_key in day_averages:
|
||||||
|
data_point[average_field] = day_averages[day_key]
|
||||||
|
|
||||||
chart_data.append(data_point)
|
chart_data.append(data_point)
|
||||||
|
|
||||||
elif resolution == "hourly":
|
|
||||||
# Hourly averages (4 intervals per hour: :00, :15, :30, :45)
|
|
||||||
chart_data.extend(
|
|
||||||
aggregate_hourly_exact(
|
|
||||||
day_prices,
|
|
||||||
start_time_field,
|
|
||||||
price_field,
|
|
||||||
coordinator=coordinator,
|
|
||||||
use_subunit_currency=subunit_currency,
|
|
||||||
round_decimals=round_decimals,
|
|
||||||
include_level=include_level,
|
|
||||||
include_rating_level=include_rating_level,
|
|
||||||
level_filter=level_filter,
|
|
||||||
rating_level_filter=rating_level_filter,
|
|
||||||
include_average=include_average,
|
|
||||||
level_field=level_field,
|
|
||||||
rating_level_field=rating_level_field,
|
|
||||||
average_field=average_field,
|
|
||||||
day_average=day_averages.get(day),
|
|
||||||
threshold_low=threshold_low,
|
|
||||||
period_timestamps=period_timestamps,
|
|
||||||
threshold_high=threshold_high,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Remove trailing null values ONLY for insert_nulls='segments' mode.
|
# Remove trailing null values ONLY for insert_nulls='segments' mode.
|
||||||
# For 'all' mode, trailing nulls are intentional (show no-match until end of day).
|
# For 'all' mode, trailing nulls are intentional (show no-match until end of day).
|
||||||
|
|
|
||||||
|
|
@ -145,12 +145,14 @@ async def handle_get_price(call: ServiceCall) -> ServiceResponse:
|
||||||
|
|
||||||
# Call the interval pool to get intervals (with intelligent caching)
|
# Call the interval pool to get intervals (with intelligent caching)
|
||||||
# Single-home architecture: pool knows its home_id, no parameter needed
|
# Single-home architecture: pool knows its home_id, no parameter needed
|
||||||
price_info = await pool.get_intervals(
|
price_info, _api_called = await pool.get_intervals(
|
||||||
api_client=api_client,
|
api_client=api_client,
|
||||||
user_data=user_data,
|
user_data=user_data,
|
||||||
start_time=start_time,
|
start_time=start_time,
|
||||||
end_time=end_time,
|
end_time=end_time,
|
||||||
)
|
)
|
||||||
|
# Note: We ignore api_called flag here - service always returns requested data
|
||||||
|
# regardless of whether it came from cache or was fetched fresh from API
|
||||||
|
|
||||||
except Exception as error:
|
except Exception as error:
|
||||||
_LOGGER.exception("Error fetching price data")
|
_LOGGER.exception("Error fetching price data")
|
||||||
|
|
|
||||||
38
custom_components/tibber_prices/switch/__init__.py
Normal file
38
custom_components/tibber_prices/switch/__init__.py
Normal file
|
|
@ -0,0 +1,38 @@
|
||||||
|
"""
|
||||||
|
Switch platform for Tibber Prices integration.
|
||||||
|
|
||||||
|
Provides configurable switch entities for runtime overrides of Best Price
|
||||||
|
and Peak Price period calculation boolean settings (enable_min_periods).
|
||||||
|
|
||||||
|
When enabled, these entities take precedence over the options flow settings.
|
||||||
|
When disabled (default), the options flow settings are used.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
|
from .core import TibberPricesConfigSwitch
|
||||||
|
from .definitions import SWITCH_ENTITY_DESCRIPTIONS
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from custom_components.tibber_prices.data import TibberPricesConfigEntry
|
||||||
|
from homeassistant.core import HomeAssistant
|
||||||
|
from homeassistant.helpers.entity_platform import AddEntitiesCallback
|
||||||
|
|
||||||
|
|
||||||
|
async def async_setup_entry(
|
||||||
|
_hass: HomeAssistant,
|
||||||
|
entry: TibberPricesConfigEntry,
|
||||||
|
async_add_entities: AddEntitiesCallback,
|
||||||
|
) -> None:
|
||||||
|
"""Set up Tibber Prices switch entities based on a config entry."""
|
||||||
|
coordinator = entry.runtime_data.coordinator
|
||||||
|
|
||||||
|
async_add_entities(
|
||||||
|
TibberPricesConfigSwitch(
|
||||||
|
coordinator=coordinator,
|
||||||
|
entity_description=entity_description,
|
||||||
|
)
|
||||||
|
for entity_description in SWITCH_ENTITY_DESCRIPTIONS
|
||||||
|
)
|
||||||
245
custom_components/tibber_prices/switch/core.py
Normal file
245
custom_components/tibber_prices/switch/core.py
Normal file
|
|
@ -0,0 +1,245 @@
|
||||||
|
"""
|
||||||
|
Switch entity implementation for Tibber Prices configuration overrides.
|
||||||
|
|
||||||
|
These entities allow runtime configuration of boolean period calculation settings.
|
||||||
|
When a config entity is enabled, its value takes precedence over the
|
||||||
|
options flow setting for period calculations.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import TYPE_CHECKING, Any
|
||||||
|
|
||||||
|
from custom_components.tibber_prices.const import (
|
||||||
|
DOMAIN,
|
||||||
|
get_home_type_translation,
|
||||||
|
get_translation,
|
||||||
|
)
|
||||||
|
from homeassistant.components.switch import SwitchEntity
|
||||||
|
from homeassistant.core import callback
|
||||||
|
from homeassistant.helpers.device_registry import DeviceEntryType, DeviceInfo
|
||||||
|
from homeassistant.helpers.restore_state import RestoreEntity
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from custom_components.tibber_prices.coordinator import (
|
||||||
|
TibberPricesDataUpdateCoordinator,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .definitions import TibberPricesSwitchEntityDescription
|
||||||
|
|
||||||
|
_LOGGER = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class TibberPricesConfigSwitch(RestoreEntity, SwitchEntity):
|
||||||
|
"""
|
||||||
|
A switch entity for configuring boolean period calculation settings at runtime.
|
||||||
|
|
||||||
|
When this entity is enabled, its value overrides the corresponding
|
||||||
|
options flow setting. When disabled (default), the options flow
|
||||||
|
setting is used for period calculations.
|
||||||
|
|
||||||
|
The entity restores its value after Home Assistant restart.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_attr_has_entity_name = True
|
||||||
|
entity_description: TibberPricesSwitchEntityDescription
|
||||||
|
|
||||||
|
# Exclude all attributes from recorder history - config entities don't need history
|
||||||
|
_unrecorded_attributes = frozenset(
|
||||||
|
{
|
||||||
|
"description",
|
||||||
|
"long_description",
|
||||||
|
"usage_tips",
|
||||||
|
"friendly_name",
|
||||||
|
"icon",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
coordinator: TibberPricesDataUpdateCoordinator,
|
||||||
|
entity_description: TibberPricesSwitchEntityDescription,
|
||||||
|
) -> None:
|
||||||
|
"""Initialize the config switch entity."""
|
||||||
|
self.coordinator = coordinator
|
||||||
|
self.entity_description = entity_description
|
||||||
|
|
||||||
|
# Set unique ID
|
||||||
|
self._attr_unique_id = (
|
||||||
|
f"{coordinator.config_entry.unique_id or coordinator.config_entry.entry_id}_{entity_description.key}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize with None - will be set in async_added_to_hass
|
||||||
|
self._attr_is_on: bool | None = None
|
||||||
|
|
||||||
|
# Setup device info
|
||||||
|
self._setup_device_info()
|
||||||
|
|
||||||
|
def _setup_device_info(self) -> None:
|
||||||
|
"""Set up device information."""
|
||||||
|
home_name, home_id, home_type = self._get_device_info()
|
||||||
|
language = self.coordinator.hass.config.language or "en"
|
||||||
|
translated_model = get_home_type_translation(home_type, language) if home_type else "Unknown"
|
||||||
|
|
||||||
|
self._attr_device_info = DeviceInfo(
|
||||||
|
entry_type=DeviceEntryType.SERVICE,
|
||||||
|
identifiers={
|
||||||
|
(
|
||||||
|
DOMAIN,
|
||||||
|
self.coordinator.config_entry.unique_id or self.coordinator.config_entry.entry_id,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
name=home_name,
|
||||||
|
manufacturer="Tibber",
|
||||||
|
model=translated_model,
|
||||||
|
serial_number=home_id if home_id else None,
|
||||||
|
configuration_url="https://developer.tibber.com/explorer",
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_device_info(self) -> tuple[str, str | None, str | None]:
|
||||||
|
"""Get device name, ID and type."""
|
||||||
|
user_profile = self.coordinator.get_user_profile()
|
||||||
|
is_subentry = bool(self.coordinator.config_entry.data.get("home_id"))
|
||||||
|
home_id = self.coordinator.config_entry.unique_id
|
||||||
|
home_type = None
|
||||||
|
|
||||||
|
if is_subentry:
|
||||||
|
home_data = self.coordinator.config_entry.data.get("home_data", {})
|
||||||
|
home_id = self.coordinator.config_entry.data.get("home_id")
|
||||||
|
address = home_data.get("address", {})
|
||||||
|
address1 = address.get("address1", "")
|
||||||
|
city = address.get("city", "")
|
||||||
|
app_nickname = home_data.get("appNickname", "")
|
||||||
|
home_type = home_data.get("type", "")
|
||||||
|
|
||||||
|
if app_nickname and app_nickname.strip():
|
||||||
|
home_name = app_nickname.strip()
|
||||||
|
elif address1:
|
||||||
|
home_name = address1
|
||||||
|
if city:
|
||||||
|
home_name = f"{home_name}, {city}"
|
||||||
|
else:
|
||||||
|
home_name = f"Tibber Home {home_id[:8]}" if home_id else "Tibber Home"
|
||||||
|
elif user_profile:
|
||||||
|
home_name = user_profile.get("name") or "Tibber Home"
|
||||||
|
else:
|
||||||
|
home_name = "Tibber Home"
|
||||||
|
|
||||||
|
return home_name, home_id, home_type
|
||||||
|
|
||||||
|
async def async_added_to_hass(self) -> None:
|
||||||
|
"""Handle entity which was added to Home Assistant."""
|
||||||
|
await super().async_added_to_hass()
|
||||||
|
|
||||||
|
# Try to restore previous state
|
||||||
|
last_state = await self.async_get_last_state()
|
||||||
|
if last_state is not None and last_state.state in ("on", "off"):
|
||||||
|
self._attr_is_on = last_state.state == "on"
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Restored %s value: %s",
|
||||||
|
self.entity_description.key,
|
||||||
|
self._attr_is_on,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Initialize with value from options flow (or default)
|
||||||
|
self._attr_is_on = self._get_value_from_options()
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Initialized %s from options: %s",
|
||||||
|
self.entity_description.key,
|
||||||
|
self._attr_is_on,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Register override with coordinator if entity is enabled
|
||||||
|
await self._sync_override_state()
|
||||||
|
|
||||||
|
async def async_will_remove_from_hass(self) -> None:
|
||||||
|
"""Handle entity removal from Home Assistant."""
|
||||||
|
# Remove override when entity is removed
|
||||||
|
self.coordinator.remove_config_override(
|
||||||
|
self.entity_description.config_key,
|
||||||
|
self.entity_description.config_section,
|
||||||
|
)
|
||||||
|
await super().async_will_remove_from_hass()
|
||||||
|
|
||||||
|
def _get_value_from_options(self) -> bool:
|
||||||
|
"""Get the current value from options flow or default."""
|
||||||
|
options = self.coordinator.config_entry.options
|
||||||
|
section = options.get(self.entity_description.config_section, {})
|
||||||
|
value = section.get(
|
||||||
|
self.entity_description.config_key,
|
||||||
|
self.entity_description.default_value,
|
||||||
|
)
|
||||||
|
return bool(value)
|
||||||
|
|
||||||
|
async def _sync_override_state(self) -> None:
|
||||||
|
"""Sync the override state with the coordinator based on entity enabled state."""
|
||||||
|
# Check if entity is enabled in registry
|
||||||
|
if self.registry_entry is not None and not self.registry_entry.disabled:
|
||||||
|
# Entity is enabled - register the override
|
||||||
|
if self._attr_is_on is not None:
|
||||||
|
self.coordinator.set_config_override(
|
||||||
|
self.entity_description.config_key,
|
||||||
|
self.entity_description.config_section,
|
||||||
|
self._attr_is_on,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Entity is disabled - remove override
|
||||||
|
self.coordinator.remove_config_override(
|
||||||
|
self.entity_description.config_key,
|
||||||
|
self.entity_description.config_section,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def async_turn_on(self, **_kwargs: Any) -> None:
|
||||||
|
"""Turn the switch on."""
|
||||||
|
await self._set_value(is_on=True)
|
||||||
|
|
||||||
|
async def async_turn_off(self, **_kwargs: Any) -> None:
|
||||||
|
"""Turn the switch off."""
|
||||||
|
await self._set_value(is_on=False)
|
||||||
|
|
||||||
|
async def _set_value(self, *, is_on: bool) -> None:
|
||||||
|
"""Update the current value and trigger recalculation."""
|
||||||
|
self._attr_is_on = is_on
|
||||||
|
|
||||||
|
# Update the coordinator's runtime override
|
||||||
|
self.coordinator.set_config_override(
|
||||||
|
self.entity_description.config_key,
|
||||||
|
self.entity_description.config_section,
|
||||||
|
is_on,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Trigger period recalculation (same path as options update)
|
||||||
|
await self.coordinator.async_handle_config_override_update()
|
||||||
|
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Updated %s to %s, triggered period recalculation",
|
||||||
|
self.entity_description.key,
|
||||||
|
is_on,
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def extra_state_attributes(self) -> dict[str, Any] | None:
|
||||||
|
"""Return entity state attributes with description."""
|
||||||
|
language = self.coordinator.hass.config.language or "en"
|
||||||
|
|
||||||
|
# Try to get description from custom translations
|
||||||
|
# Custom translations use direct path: switch.{key}.description
|
||||||
|
translation_path = [
|
||||||
|
"switch",
|
||||||
|
self.entity_description.translation_key or self.entity_description.key,
|
||||||
|
"description",
|
||||||
|
]
|
||||||
|
description = get_translation(translation_path, language)
|
||||||
|
|
||||||
|
attrs: dict[str, Any] = {}
|
||||||
|
if description:
|
||||||
|
attrs["description"] = description
|
||||||
|
|
||||||
|
return attrs if attrs else None
|
||||||
|
|
||||||
|
@callback
|
||||||
|
def async_registry_entry_updated(self) -> None:
|
||||||
|
"""Handle entity registry update (enabled/disabled state change)."""
|
||||||
|
# This is called when the entity is enabled/disabled in the UI
|
||||||
|
self.hass.async_create_task(self._sync_override_state())
|
||||||
84
custom_components/tibber_prices/switch/definitions.py
Normal file
84
custom_components/tibber_prices/switch/definitions.py
Normal file
|
|
@ -0,0 +1,84 @@
|
||||||
|
"""
|
||||||
|
Switch entity definitions for Tibber Prices configuration overrides.
|
||||||
|
|
||||||
|
These switch entities allow runtime configuration of boolean settings
|
||||||
|
for Best Price and Peak Price period calculations.
|
||||||
|
|
||||||
|
When enabled, the entity value takes precedence over the options flow setting.
|
||||||
|
When disabled (default), the options flow setting is used.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
from homeassistant.components.switch import SwitchEntityDescription
|
||||||
|
from homeassistant.const import EntityCategory
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True, kw_only=True)
|
||||||
|
class TibberPricesSwitchEntityDescription(SwitchEntityDescription):
|
||||||
|
"""Describes a Tibber Prices switch entity for config overrides."""
|
||||||
|
|
||||||
|
# The config key this entity overrides (matches CONF_* constants)
|
||||||
|
config_key: str
|
||||||
|
# The section in options where this setting is stored
|
||||||
|
config_section: str
|
||||||
|
# Whether this is for best_price (False) or peak_price (True)
|
||||||
|
is_peak_price: bool = False
|
||||||
|
# Default value from const.py
|
||||||
|
default_value: bool = True
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# BEST PRICE PERIOD CONFIGURATION OVERRIDES (Boolean)
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
BEST_PRICE_SWITCH_ENTITIES = (
|
||||||
|
SwitchEntityDescription(
|
||||||
|
key="best_price_enable_relaxation_override",
|
||||||
|
translation_key="best_price_enable_relaxation_override",
|
||||||
|
name="Best Price: Achieve Minimum Count",
|
||||||
|
icon="mdi:arrow-down-bold-circle",
|
||||||
|
entity_category=EntityCategory.CONFIG,
|
||||||
|
entity_registry_enabled_default=False,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Custom descriptions with extra fields
|
||||||
|
BEST_PRICE_SWITCH_ENTITY_DESCRIPTIONS = (
|
||||||
|
TibberPricesSwitchEntityDescription(
|
||||||
|
key="best_price_enable_relaxation_override",
|
||||||
|
translation_key="best_price_enable_relaxation_override",
|
||||||
|
name="Best Price: Achieve Minimum Count",
|
||||||
|
icon="mdi:arrow-down-bold-circle",
|
||||||
|
entity_category=EntityCategory.CONFIG,
|
||||||
|
entity_registry_enabled_default=False,
|
||||||
|
config_key="enable_min_periods_best",
|
||||||
|
config_section="relaxation_and_target_periods",
|
||||||
|
is_peak_price=False,
|
||||||
|
default_value=True, # DEFAULT_ENABLE_MIN_PERIODS_BEST
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# PEAK PRICE PERIOD CONFIGURATION OVERRIDES (Boolean)
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
PEAK_PRICE_SWITCH_ENTITY_DESCRIPTIONS = (
|
||||||
|
TibberPricesSwitchEntityDescription(
|
||||||
|
key="peak_price_enable_relaxation_override",
|
||||||
|
translation_key="peak_price_enable_relaxation_override",
|
||||||
|
name="Peak Price: Achieve Minimum Count",
|
||||||
|
icon="mdi:arrow-up-bold-circle",
|
||||||
|
entity_category=EntityCategory.CONFIG,
|
||||||
|
entity_registry_enabled_default=False,
|
||||||
|
config_key="enable_min_periods_peak",
|
||||||
|
config_section="relaxation_and_target_periods",
|
||||||
|
is_peak_price=True,
|
||||||
|
default_value=True, # DEFAULT_ENABLE_MIN_PERIODS_PEAK
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
# All switch entity descriptions combined
|
||||||
|
SWITCH_ENTITY_DESCRIPTIONS = BEST_PRICE_SWITCH_ENTITY_DESCRIPTIONS + PEAK_PRICE_SWITCH_ENTITY_DESCRIPTIONS
|
||||||
|
|
@ -11,14 +11,14 @@
|
||||||
},
|
},
|
||||||
"new_token": {
|
"new_token": {
|
||||||
"title": "API-Token eingeben",
|
"title": "API-Token eingeben",
|
||||||
"description": "Richte Tibber Preisinformationen & Bewertungen ein.\n\nUm einen API-Zugriffstoken zu generieren, besuche https://developer.tibber.com.",
|
"description": "Richte Tibber Preisinformationen & Bewertungen ein.\n\nUm einen API-Zugriffstoken zu generieren, besuche [{tibber_url}]({tibber_url}).",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API-Zugriffstoken"
|
"access_token": "API-Zugriffstoken"
|
||||||
},
|
},
|
||||||
"submit": "Token validieren"
|
"submit": "Token validieren"
|
||||||
},
|
},
|
||||||
"user": {
|
"user": {
|
||||||
"description": "Richte Tibber Preisinformationen & Bewertungen ein.\n\nUm einen API-Zugriffstoken zu generieren, besuche https://developer.tibber.com.",
|
"description": "Richte Tibber Preisinformationen & Bewertungen ein.\n\nUm einen API-Zugriffstoken zu generieren, besuche [{tibber_url}]({tibber_url}).",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API-Zugriffstoken"
|
"access_token": "API-Zugriffstoken"
|
||||||
},
|
},
|
||||||
|
|
@ -42,7 +42,7 @@
|
||||||
},
|
},
|
||||||
"reauth_confirm": {
|
"reauth_confirm": {
|
||||||
"title": "Tibber Preis-Integration erneut authentifizieren",
|
"title": "Tibber Preis-Integration erneut authentifizieren",
|
||||||
"description": "Der Zugriffstoken für Tibber ist nicht mehr gültig. Bitte gib einen neuen API-Zugriffstoken ein, um diese Integration weiter zu nutzen.\n\nUm einen neuen API-Zugriffstoken zu generieren, besuche https://developer.tibber.com.",
|
"description": "Der Zugriffstoken für Tibber ist nicht mehr gültig. Bitte gib einen neuen API-Zugriffstoken ein, um diese Integration weiter zu nutzen.\n\nUm einen neuen API-Zugriffstoken zu generieren, besuche [{tibber_url}]({tibber_url}).",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API-Zugriffstoken"
|
"access_token": "API-Zugriffstoken"
|
||||||
},
|
},
|
||||||
|
|
@ -77,7 +77,23 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"common": {
|
"common": {
|
||||||
"step_progress": "{step_num} / {total_steps}"
|
"step_progress": "{step_num} / {total_steps}",
|
||||||
|
"override_warning_template": "⚠️ {fields} wird durch Konfigurations-Entität gesteuert",
|
||||||
|
"override_warning_and": "und",
|
||||||
|
"override_field_label_best_price_min_period_length": "Mindestperiodenlänge",
|
||||||
|
"override_field_label_best_price_max_level_gap_count": "Lückentoleranz",
|
||||||
|
"override_field_label_best_price_flex": "Flexibilität",
|
||||||
|
"override_field_label_best_price_min_distance_from_avg": "Mindestabstand",
|
||||||
|
"override_field_label_enable_min_periods_best": "Mindestzahl erreichen",
|
||||||
|
"override_field_label_min_periods_best": "Mindestperioden",
|
||||||
|
"override_field_label_relaxation_attempts_best": "Lockerungsversuche",
|
||||||
|
"override_field_label_peak_price_min_period_length": "Mindestperiodenlänge",
|
||||||
|
"override_field_label_peak_price_max_level_gap_count": "Lückentoleranz",
|
||||||
|
"override_field_label_peak_price_flex": "Flexibilität",
|
||||||
|
"override_field_label_peak_price_min_distance_from_avg": "Mindestabstand",
|
||||||
|
"override_field_label_enable_min_periods_peak": "Mindestzahl erreichen",
|
||||||
|
"override_field_label_min_periods_peak": "Mindestperioden",
|
||||||
|
"override_field_label_relaxation_attempts_peak": "Lockerungsversuche"
|
||||||
},
|
},
|
||||||
"config_subentries": {
|
"config_subentries": {
|
||||||
"home": {
|
"home": {
|
||||||
|
|
@ -136,6 +152,7 @@
|
||||||
"general_settings": "⚙️ Allgemeine Einstellungen",
|
"general_settings": "⚙️ Allgemeine Einstellungen",
|
||||||
"display_settings": "💱 Währungsanzeige",
|
"display_settings": "💱 Währungsanzeige",
|
||||||
"current_interval_price_rating": "📊 Preisbewertung",
|
"current_interval_price_rating": "📊 Preisbewertung",
|
||||||
|
"price_level": "🏷️ Preisniveau",
|
||||||
"volatility": "💨 Preis-Volatilität",
|
"volatility": "💨 Preis-Volatilität",
|
||||||
"best_price": "💚 Bestpreis",
|
"best_price": "💚 Bestpreis",
|
||||||
"peak_price": "🔴 Spitzenpreis",
|
"peak_price": "🔴 Spitzenpreis",
|
||||||
|
|
@ -170,21 +187,25 @@
|
||||||
"submit": "↩ Speichern & Zurück"
|
"submit": "↩ Speichern & Zurück"
|
||||||
},
|
},
|
||||||
"current_interval_price_rating": {
|
"current_interval_price_rating": {
|
||||||
"title": "📊 Preisbewertungs-Schwellenwerte",
|
"title": "📊 Preisbewertungs-Einstellungen",
|
||||||
"description": "**Konfiguriere Schwellenwerte für Preisbewertungsstufen (niedrig/normal/hoch) basierend auf dem Vergleich mit dem nachlaufenden 24-Stunden-Durchschnitt.**",
|
"description": "**Konfiguriere Schwellenwerte und Stabilisierung für Preisbewertungsstufen (niedrig/normal/hoch) basierend auf dem Vergleich mit dem nachlaufenden 24-Stunden-Durchschnitt.**{entity_warning}",
|
||||||
"data": {
|
"data": {
|
||||||
"price_rating_threshold_low": "Niedrig-Schwelle",
|
"price_rating_threshold_low": "Niedrig-Schwelle",
|
||||||
"price_rating_threshold_high": "Hoch-Schwelle"
|
"price_rating_threshold_high": "Hoch-Schwelle",
|
||||||
|
"price_rating_hysteresis": "Hysterese",
|
||||||
|
"price_rating_gap_tolerance": "Lücken-Toleranz"
|
||||||
},
|
},
|
||||||
"data_description": {
|
"data_description": {
|
||||||
"price_rating_threshold_low": "Prozentwert, um wie viel der aktuelle Preis unter dem nachlaufenden 24-Stunden-Durchschnitt liegen muss, damit er als 'niedrig' bewertet wird. Beispiel: 5 bedeutet mindestens 5% unter Durchschnitt. Sensoren mit dieser Bewertung zeigen günstige Zeitfenster an. Standard: 5%",
|
"price_rating_threshold_low": "Prozentwert, um wie viel der aktuelle Preis unter dem nachlaufenden 24-Stunden-Durchschnitt liegen muss, damit er als 'niedrig' bewertet wird. Beispiel: -10 bedeutet mindestens 10% unter Durchschnitt. Sensoren mit dieser Bewertung zeigen günstige Zeitfenster an. Standard: -10%",
|
||||||
"price_rating_threshold_high": "Prozentwert, um wie viel der aktuelle Preis über dem nachlaufenden 24-Stunden-Durchschnitt liegen muss, damit er als 'hoch' bewertet wird. Beispiel: 10 bedeutet mindestens 10% über Durchschnitt. Sensoren mit dieser Bewertung warnen vor teuren Zeitfenstern. Standard: 10%"
|
"price_rating_threshold_high": "Prozentwert, um wie viel der aktuelle Preis über dem nachlaufenden 24-Stunden-Durchschnitt liegen muss, damit er als 'hoch' bewertet wird. Beispiel: 10 bedeutet mindestens 10% über Durchschnitt. Sensoren mit dieser Bewertung warnen vor teuren Zeitfenstern. Standard: 10%",
|
||||||
|
"price_rating_hysteresis": "Prozentband um die Schwellenwerte zur Vermeidung schneller Zustandswechsel. Wenn die Bewertung bereits NIEDRIG ist, muss der Preis über (Schwelle + Hysterese) steigen, um zu NORMAL zu wechseln. Ebenso muss bei HOCH der Preis unter (Schwelle - Hysterese) fallen, um den Zustand zu verlassen. Dies sorgt für Stabilität bei Automationen, die auf Bewertungsänderungen reagieren. Auf 0 setzen zum Deaktivieren. Standard: 2%",
|
||||||
|
"price_rating_gap_tolerance": "Maximale Anzahl aufeinanderfolgender Intervalle, die 'geglättet' werden können, wenn sie sich von den umgebenden Bewertungen unterscheiden. Kleine isolierte Bewertungsänderungen werden in den dominanten Nachbarblock integriert. Dies sorgt für Stabilität bei Automationen, indem kurze Bewertungsspitzen keine unnötigen Aktionen auslösen. Beispiel: 1 bedeutet, dass ein einzelnes 'normal'-Intervall umgeben von 'hoch'-Intervallen zu 'hoch' korrigiert wird. Auf 0 setzen zum Deaktivieren. Standard: 1"
|
||||||
},
|
},
|
||||||
"submit": "↩ Speichern & Zurück"
|
"submit": "↩ Speichern & Zurück"
|
||||||
},
|
},
|
||||||
"best_price": {
|
"best_price": {
|
||||||
"title": "💚 Bestpreis-Zeitraum Einstellungen",
|
"title": "💚 Bestpreis-Zeitraum Einstellungen",
|
||||||
"description": "**Konfiguration für den Bestpreis-Zeitraum mit den niedrigsten Strompreisen.**\n\n---",
|
"description": "**Konfiguration für den Bestpreis-Zeitraum mit den niedrigsten Strompreisen.**{entity_warning}{override_warning}\n\n---",
|
||||||
"sections": {
|
"sections": {
|
||||||
"period_settings": {
|
"period_settings": {
|
||||||
"name": "Zeitraumdauer & Preisniveaus",
|
"name": "Zeitraumdauer & Preisniveaus",
|
||||||
|
|
@ -231,7 +252,7 @@
|
||||||
},
|
},
|
||||||
"peak_price": {
|
"peak_price": {
|
||||||
"title": "🔴 Spitzenpreis-Zeitraum Einstellungen",
|
"title": "🔴 Spitzenpreis-Zeitraum Einstellungen",
|
||||||
"description": "**Konfiguration für den Spitzenpreis-Zeitraum mit den höchsten Strompreisen.**\n\n---",
|
"description": "**Konfiguration für den Spitzenpreis-Zeitraum mit den höchsten Strompreisen.**{entity_warning}{override_warning}\n\n---",
|
||||||
"sections": {
|
"sections": {
|
||||||
"period_settings": {
|
"period_settings": {
|
||||||
"name": "Zeitraum-Einstellungen",
|
"name": "Zeitraum-Einstellungen",
|
||||||
|
|
@ -278,20 +299,24 @@
|
||||||
},
|
},
|
||||||
"price_trend": {
|
"price_trend": {
|
||||||
"title": "📈 Preistrend-Schwellenwerte",
|
"title": "📈 Preistrend-Schwellenwerte",
|
||||||
"description": "**Konfiguriere Schwellenwerte für Preistrend-Sensoren. Diese Sensoren vergleichen den aktuellen Preis mit dem Durchschnitt der nächsten N Stunden, um festzustellen, ob die Preise steigen, fallen oder stabil sind.**",
|
"description": "**Konfiguriere Schwellenwerte für Preistrend-Sensoren.** Diese Sensoren vergleichen den aktuellen Preis mit dem Durchschnitt der nächsten N Stunden, um festzustellen, ob die Preise steigen, fallen oder stabil sind.\n\n**5-Stufen-Skala:** Nutzt stark_fallend (-2), fallend (-1), stabil (0), steigend (+1), stark_steigend (+2) für Automations-Vergleiche über das trend_value Attribut.{entity_warning}",
|
||||||
"data": {
|
"data": {
|
||||||
"price_trend_threshold_rising": "Steigend-Schwelle",
|
"price_trend_threshold_rising": "Steigend-Schwelle",
|
||||||
"price_trend_threshold_falling": "Fallend-Schwelle"
|
"price_trend_threshold_strongly_rising": "Stark steigend-Schwelle",
|
||||||
|
"price_trend_threshold_falling": "Fallend-Schwelle",
|
||||||
|
"price_trend_threshold_strongly_falling": "Stark fallend-Schwelle"
|
||||||
},
|
},
|
||||||
"data_description": {
|
"data_description": {
|
||||||
"price_trend_threshold_rising": "Prozentwert, um wie viel der Durchschnitt der nächsten N Stunden über dem aktuellen Preis liegen muss, damit der Trend als 'steigend' gilt. Beispiel: 5 bedeutet Durchschnitt ist mindestens 5% höher → Preise werden steigen. Typische Werte: 5-15%. Standard: 5%",
|
"price_trend_threshold_rising": "Prozentwert, um wie viel der Durchschnitt der nächsten N Stunden über dem aktuellen Preis liegen muss, damit der Trend als 'steigend' gilt. Beispiel: 3 bedeutet Durchschnitt ist mindestens 3% höher → Preise werden steigen. Typische Werte: 3-10%. Standard: 3%",
|
||||||
"price_trend_threshold_falling": "Prozentwert (negativ), um wie viel der Durchschnitt der nächsten N Stunden unter dem aktuellen Preis liegen muss, damit der Trend als 'fallend' gilt. Beispiel: -5 bedeutet Durchschnitt ist mindestens 5% niedriger → Preise werden fallen. Typische Werte: -5 bis -15%. Standard: -5%"
|
"price_trend_threshold_strongly_rising": "Prozentwert für 'stark steigend'-Trend. Muss höher sein als die steigend-Schwelle. Beispiel: 6 bedeutet Durchschnitt ist mindestens 6% höher → Preise werden deutlich steigen. Typische Werte: 6-15%. Standard: 6%",
|
||||||
|
"price_trend_threshold_falling": "Prozentwert (negativ), um wie viel der Durchschnitt der nächsten N Stunden unter dem aktuellen Preis liegen muss, damit der Trend als 'fallend' gilt. Beispiel: -3 bedeutet Durchschnitt ist mindestens 3% niedriger → Preise werden fallen. Typische Werte: -3 bis -10%. Standard: -3%",
|
||||||
|
"price_trend_threshold_strongly_falling": "Prozentwert (negativ) für 'stark fallend'-Trend. Muss niedriger (negativer) sein als die fallend-Schwelle. Beispiel: -6 bedeutet Durchschnitt ist mindestens 6% niedriger → Preise werden deutlich fallen. Typische Werte: -6 bis -15%. Standard: -6%"
|
||||||
},
|
},
|
||||||
"submit": "↩ Speichern & Zurück"
|
"submit": "↩ Speichern & Zurück"
|
||||||
},
|
},
|
||||||
"volatility": {
|
"volatility": {
|
||||||
"title": "💨 Volatilität Schwellenwerte",
|
"title": "💨 Volatilität Schwellenwerte",
|
||||||
"description": "**Konfiguriere Schwellenwerte für die Volatilitätsklassifizierung.** Volatilität misst relative Preisschwankungen anhand des Variationskoeffizienten (VK = Standardabweichung / Durchschnitt × 100%). Diese Schwellenwerte sind Prozentwerte, die für alle Preisniveaus funktionieren.\n\nVerwendet von:\n• Volatilitätssensoren (Klassifizierung)\n• Trend-Sensoren (adaptive Schwellenanpassung: <moderat = empfindlicher, ≥hoch = weniger empfindlich)",
|
"description": "**Konfiguriere Schwellenwerte für die Volatilitätsklassifizierung.** Volatilität misst relative Preisschwankungen anhand des Variationskoeffizienten (VK = Standardabweichung / Durchschnitt × 100%). Diese Schwellenwerte sind Prozentwerte, die für alle Preisniveaus funktionieren.\n\nVerwendet von:\n• Volatilitätssensoren (Klassifizierung)\n• Trend-Sensoren (adaptive Schwellenanpassung: <moderat = empfindlicher, ≥hoch = weniger empfindlich){entity_warning}",
|
||||||
"data": {
|
"data": {
|
||||||
"volatility_threshold_moderate": "Moderat-Schwelle",
|
"volatility_threshold_moderate": "Moderat-Schwelle",
|
||||||
"volatility_threshold_high": "Hoch-Schwelle",
|
"volatility_threshold_high": "Hoch-Schwelle",
|
||||||
|
|
@ -306,7 +331,7 @@
|
||||||
},
|
},
|
||||||
"chart_data_export": {
|
"chart_data_export": {
|
||||||
"title": "📊 Chart Data Export Sensor",
|
"title": "📊 Chart Data Export Sensor",
|
||||||
"description": "Der Chart Data Export Sensor stellt Preisdaten als Sensor-Attribute zur Verfügung.\n\n⚠️ **Hinweis:** Dieser Sensor ist ein Legacy-Feature für Kompatibilität mit älteren Tools.\n\n**Für neue Setups empfohlen:** Nutze den `tibber_prices.get_chartdata` **Service direkt** - er ist flexibler, effizienter und der moderne Home Assistant-Ansatz.\n\n**Wann dieser Sensor sinnvoll ist:**\n\n✅ Dein Dashboard-Tool kann **nur** Attribute lesen (keine Service-Aufrufe)\n✅ Du brauchst statische Daten, die automatisch aktualisiert werden\n❌ **Nicht für Automationen:** Nutze dort direkt `tibber_prices.get_chartdata` - flexibler und effizienter!\n\n---\n\n**Sensor aktivieren:**\n\n1. Öffne **Einstellungen → Geräte & Dienste → Tibber Prices**\n2. Wähle dein Home → Finde **'Chart Data Export'** (Diagnose-Bereich)\n3. **Aktiviere den Sensor** (standardmäßig deaktiviert)\n\n**Konfiguration (optional):**\n\nStandardeinstellung funktioniert sofort (heute+morgen, 15-Minuten-Intervalle, reine Preise).\n\nFür Anpassungen füge in **`configuration.yaml`** ein:\n\n```yaml\ntibber_prices:\n chart_export:\n day:\n - today\n - tomorrow\n include_level: true\n include_rating_level: true\n```\n\n**Alle Parameter:** Siehe `tibber_prices.get_chartdata` Service-Dokumentation",
|
"description": "Der Chart Data Export Sensor stellt Preisdaten als Sensor-Attribute zur Verfügung.\n\n⚠️ **Hinweis:** Dieser Sensor ist ein Legacy-Feature für Kompatibilität mit älteren Tools.\n\n**Für neue Setups empfohlen:** Nutze den `tibber_prices.get_chartdata` **Service direkt** - er ist flexibler, effizienter und der moderne Home Assistant-Ansatz.\n\n**Wann dieser Sensor sinnvoll ist:**\n\n✅ Dein Dashboard-Tool kann **nur** Attribute lesen (keine Service-Aufrufe)\n✅ Du brauchst statische Daten, die automatisch aktualisiert werden\n❌ **Nicht für Automationen:** Nutze dort direkt `tibber_prices.get_chartdata` - flexibler und effizienter!\n\n---\n\n{sensor_status_info}",
|
||||||
"submit": "↩ Ok & Zurück"
|
"submit": "↩ Ok & Zurück"
|
||||||
},
|
},
|
||||||
"reset_to_defaults": {
|
"reset_to_defaults": {
|
||||||
|
|
@ -316,6 +341,17 @@
|
||||||
"confirm_reset": "Ja, alles auf Werkseinstellungen zurücksetzen"
|
"confirm_reset": "Ja, alles auf Werkseinstellungen zurücksetzen"
|
||||||
},
|
},
|
||||||
"submit": "Jetzt zurücksetzen"
|
"submit": "Jetzt zurücksetzen"
|
||||||
|
},
|
||||||
|
"price_level": {
|
||||||
|
"title": "🏷️ Preisniveau-Einstellungen (von Tibber API)",
|
||||||
|
"description": "**Konfiguriere die Stabilisierung für Tibbers Preisniveau-Klassifizierung (sehr günstig/günstig/normal/teuer/sehr teuer).**\n\nTibbers API liefert ein Preisniveau-Feld für jedes Intervall. Diese Einstellung glättet kurze Schwankungen, um Instabilität in Automatisierungen zu verhindern.{entity_warning}",
|
||||||
|
"data": {
|
||||||
|
"price_level_gap_tolerance": "Gap-Toleranz"
|
||||||
|
},
|
||||||
|
"data_description": {
|
||||||
|
"price_level_gap_tolerance": "Maximale Anzahl aufeinanderfolgender Intervalle, die 'geglättet' werden können, wenn sie von umgebenden Preisniveaus abweichen. Kleine isolierte Niveauänderungen werden mit dem dominanten Nachbarblock zusammengeführt. Beispiel: 1 bedeutet, dass ein einzelnes 'normal'-Intervall, umgeben von 'günstig'-Intervallen, zu 'günstig' korrigiert wird. Auf 0 setzen zum Deaktivieren. Standard: 1"
|
||||||
|
},
|
||||||
|
"submit": "↩ Speichern & Zurück"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"error": {
|
"error": {
|
||||||
|
|
@ -340,7 +376,11 @@
|
||||||
"invalid_volatility_threshold_very_high": "Sehr hohe Volatilitätsschwelle muss zwischen 35% und 80% liegen",
|
"invalid_volatility_threshold_very_high": "Sehr hohe Volatilitätsschwelle muss zwischen 35% und 80% liegen",
|
||||||
"invalid_volatility_thresholds": "Schwellenwerte müssen aufsteigend sein: moderat < hoch < sehr hoch",
|
"invalid_volatility_thresholds": "Schwellenwerte müssen aufsteigend sein: moderat < hoch < sehr hoch",
|
||||||
"invalid_price_trend_rising": "Steigender Trendschwellenwert muss zwischen 1% und 50% liegen",
|
"invalid_price_trend_rising": "Steigender Trendschwellenwert muss zwischen 1% und 50% liegen",
|
||||||
"invalid_price_trend_falling": "Fallender Trendschwellenwert muss zwischen -50% und -1% liegen"
|
"invalid_price_trend_falling": "Fallender Trendschwellenwert muss zwischen -50% und -1% liegen",
|
||||||
|
"invalid_price_trend_strongly_rising": "Stark steigender Trendschwellenwert muss zwischen 2% und 100% liegen",
|
||||||
|
"invalid_price_trend_strongly_falling": "Stark fallender Trendschwellenwert muss zwischen -100% und -2% liegen",
|
||||||
|
"invalid_trend_strongly_rising_less_than_rising": "Stark steigend-Schwelle muss größer als steigend-Schwelle sein",
|
||||||
|
"invalid_trend_strongly_falling_greater_than_falling": "Stark fallend-Schwelle muss kleiner (negativer) als fallend-Schwelle sein"
|
||||||
},
|
},
|
||||||
"abort": {
|
"abort": {
|
||||||
"entry_not_found": "Tibber Konfigurationseintrag nicht gefunden.",
|
"entry_not_found": "Tibber Konfigurationseintrag nicht gefunden.",
|
||||||
|
|
@ -576,73 +616,91 @@
|
||||||
"price_trend_1h": {
|
"price_trend_1h": {
|
||||||
"name": "Preistrend (1h)",
|
"name": "Preistrend (1h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Stark steigend",
|
||||||
"rising": "Steigend",
|
"rising": "Steigend",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallend",
|
"falling": "Fallend",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Stark fallend"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_2h": {
|
"price_trend_2h": {
|
||||||
"name": "Preistrend (2h)",
|
"name": "Preistrend (2h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Stark steigend",
|
||||||
"rising": "Steigend",
|
"rising": "Steigend",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallend",
|
"falling": "Fallend",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Stark fallend"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_3h": {
|
"price_trend_3h": {
|
||||||
"name": "Preistrend (3h)",
|
"name": "Preistrend (3h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Stark steigend",
|
||||||
"rising": "Steigend",
|
"rising": "Steigend",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallend",
|
"falling": "Fallend",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Stark fallend"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_4h": {
|
"price_trend_4h": {
|
||||||
"name": "Preistrend (4h)",
|
"name": "Preistrend (4h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Stark steigend",
|
||||||
"rising": "Steigend",
|
"rising": "Steigend",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallend",
|
"falling": "Fallend",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Stark fallend"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_5h": {
|
"price_trend_5h": {
|
||||||
"name": "Preistrend (5h)",
|
"name": "Preistrend (5h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Stark steigend",
|
||||||
"rising": "Steigend",
|
"rising": "Steigend",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallend",
|
"falling": "Fallend",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Stark fallend"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_6h": {
|
"price_trend_6h": {
|
||||||
"name": "Preistrend (6h)",
|
"name": "Preistrend (6h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Stark steigend",
|
||||||
"rising": "Steigend",
|
"rising": "Steigend",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallend",
|
"falling": "Fallend",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Stark fallend"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_8h": {
|
"price_trend_8h": {
|
||||||
"name": "Preistrend (8h)",
|
"name": "Preistrend (8h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Stark steigend",
|
||||||
"rising": "Steigend",
|
"rising": "Steigend",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallend",
|
"falling": "Fallend",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Stark fallend"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_12h": {
|
"price_trend_12h": {
|
||||||
"name": "Preistrend (12h)",
|
"name": "Preistrend (12h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Stark steigend",
|
||||||
"rising": "Steigend",
|
"rising": "Steigend",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallend",
|
"falling": "Fallend",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Stark fallend"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"current_price_trend": {
|
"current_price_trend": {
|
||||||
"name": "Aktueller Preistrend",
|
"name": "Aktueller Preistrend",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Stark steigend",
|
||||||
"rising": "Steigend",
|
"rising": "Steigend",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallend",
|
"falling": "Fallend",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Stark fallend"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"next_price_trend_change": {
|
"next_price_trend_change": {
|
||||||
|
|
@ -844,6 +902,52 @@
|
||||||
"realtime_consumption_enabled": {
|
"realtime_consumption_enabled": {
|
||||||
"name": "Echtzeitverbrauch aktiviert"
|
"name": "Echtzeitverbrauch aktiviert"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"number": {
|
||||||
|
"best_price_flex_override": {
|
||||||
|
"name": "Bestpreis: Flexibilität"
|
||||||
|
},
|
||||||
|
"best_price_min_distance_override": {
|
||||||
|
"name": "Bestpreis: Mindestabstand"
|
||||||
|
},
|
||||||
|
"best_price_min_period_length_override": {
|
||||||
|
"name": "Bestpreis: Mindestperiodenlänge"
|
||||||
|
},
|
||||||
|
"best_price_min_periods_override": {
|
||||||
|
"name": "Bestpreis: Mindestperioden"
|
||||||
|
},
|
||||||
|
"best_price_relaxation_attempts_override": {
|
||||||
|
"name": "Bestpreis: Lockerungsversuche"
|
||||||
|
},
|
||||||
|
"best_price_gap_count_override": {
|
||||||
|
"name": "Bestpreis: Lückentoleranz"
|
||||||
|
},
|
||||||
|
"peak_price_flex_override": {
|
||||||
|
"name": "Spitzenpreis: Flexibilität"
|
||||||
|
},
|
||||||
|
"peak_price_min_distance_override": {
|
||||||
|
"name": "Spitzenpreis: Mindestabstand"
|
||||||
|
},
|
||||||
|
"peak_price_min_period_length_override": {
|
||||||
|
"name": "Spitzenpreis: Mindestperiodenlänge"
|
||||||
|
},
|
||||||
|
"peak_price_min_periods_override": {
|
||||||
|
"name": "Spitzenpreis: Mindestperioden"
|
||||||
|
},
|
||||||
|
"peak_price_relaxation_attempts_override": {
|
||||||
|
"name": "Spitzenpreis: Lockerungsversuche"
|
||||||
|
},
|
||||||
|
"peak_price_gap_count_override": {
|
||||||
|
"name": "Spitzenpreis: Lückentoleranz"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"switch": {
|
||||||
|
"best_price_enable_relaxation_override": {
|
||||||
|
"name": "Bestpreis: Mindestanzahl erreichen"
|
||||||
|
},
|
||||||
|
"peak_price_enable_relaxation_override": {
|
||||||
|
"name": "Spitzenpreis: Mindestanzahl erreichen"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"issues": {
|
"issues": {
|
||||||
|
|
@ -906,6 +1010,14 @@
|
||||||
"highlight_best_price": {
|
"highlight_best_price": {
|
||||||
"name": "Bestpreis-Zeiträume hervorheben",
|
"name": "Bestpreis-Zeiträume hervorheben",
|
||||||
"description": "Füge eine halbtransparente grüne Überlagerung hinzu, um die Bestpreis-Zeiträume im Diagramm hervorzuheben. Dies erleichtert die visuelle Identifizierung der optimalen Zeiten für den Energieverbrauch."
|
"description": "Füge eine halbtransparente grüne Überlagerung hinzu, um die Bestpreis-Zeiträume im Diagramm hervorzuheben. Dies erleichtert die visuelle Identifizierung der optimalen Zeiten für den Energieverbrauch."
|
||||||
|
},
|
||||||
|
"highlight_peak_price": {
|
||||||
|
"name": "Spitzenpreis-Zeiträume hervorheben",
|
||||||
|
"description": "Füge eine halbtransparente rote Überlagerung hinzu, um die Spitzenpreis-Zeiträume im Diagramm hervorzuheben. Dies erleichtert die visuelle Identifizierung der Zeiten, in denen Energie am teuersten ist."
|
||||||
|
},
|
||||||
|
"resolution": {
|
||||||
|
"name": "Auflösung",
|
||||||
|
"description": "Zeitauflösung für die Diagrammdaten. 'interval' (Standard): Originale 15-Minuten-Intervalle (96 Punkte pro Tag). 'hourly': Aggregierte Stundenwerte mit einem rollierenden 60-Minuten-Fenster (24 Punkte pro Tag) für ein übersichtlicheres Diagramm."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -11,14 +11,14 @@
|
||||||
},
|
},
|
||||||
"new_token": {
|
"new_token": {
|
||||||
"title": "Enter API Token",
|
"title": "Enter API Token",
|
||||||
"description": "Set up Tibber Price Information & Ratings.\n\nTo generate an API access token, visit https://developer.tibber.com.",
|
"description": "Set up Tibber Price Information & Ratings.\n\nTo generate an API access token, visit [{tibber_url}]({tibber_url}).",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API access token"
|
"access_token": "API access token"
|
||||||
},
|
},
|
||||||
"submit": "Validate Token"
|
"submit": "Validate Token"
|
||||||
},
|
},
|
||||||
"user": {
|
"user": {
|
||||||
"description": "Set up Tibber Price Information & Ratings.\n\nTo generate an API access token, visit https://developer.tibber.com.",
|
"description": "Set up Tibber Price Information & Ratings.\n\nTo generate an API access token, visit [{tibber_url}]({tibber_url}).",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API access token"
|
"access_token": "API access token"
|
||||||
},
|
},
|
||||||
|
|
@ -42,7 +42,7 @@
|
||||||
},
|
},
|
||||||
"reauth_confirm": {
|
"reauth_confirm": {
|
||||||
"title": "Reauthenticate Tibber Price Integration",
|
"title": "Reauthenticate Tibber Price Integration",
|
||||||
"description": "The access token for Tibber is no longer valid. Please enter a new API access token to continue using this integration.\n\nTo generate a new API access token, visit https://developer.tibber.com.",
|
"description": "The access token for Tibber is no longer valid. Please enter a new API access token to continue using this integration.\n\nTo generate a new API access token, visit [{tibber_url}]({tibber_url}).",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API access token"
|
"access_token": "API access token"
|
||||||
},
|
},
|
||||||
|
|
@ -77,7 +77,23 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"common": {
|
"common": {
|
||||||
"step_progress": "{step_num} / {total_steps}"
|
"step_progress": "{step_num} / {total_steps}",
|
||||||
|
"override_warning_template": "⚠️ {fields} controlled by config entity",
|
||||||
|
"override_warning_and": "and",
|
||||||
|
"override_field_label_best_price_min_period_length": "Minimum Period Length",
|
||||||
|
"override_field_label_best_price_max_level_gap_count": "Gap Tolerance",
|
||||||
|
"override_field_label_best_price_flex": "Flexibility",
|
||||||
|
"override_field_label_best_price_min_distance_from_avg": "Minimum Distance",
|
||||||
|
"override_field_label_enable_min_periods_best": "Achieve Minimum Count",
|
||||||
|
"override_field_label_min_periods_best": "Minimum Periods",
|
||||||
|
"override_field_label_relaxation_attempts_best": "Relaxation Attempts",
|
||||||
|
"override_field_label_peak_price_min_period_length": "Minimum Period Length",
|
||||||
|
"override_field_label_peak_price_max_level_gap_count": "Gap Tolerance",
|
||||||
|
"override_field_label_peak_price_flex": "Flexibility",
|
||||||
|
"override_field_label_peak_price_min_distance_from_avg": "Minimum Distance",
|
||||||
|
"override_field_label_enable_min_periods_peak": "Achieve Minimum Count",
|
||||||
|
"override_field_label_min_periods_peak": "Minimum Periods",
|
||||||
|
"override_field_label_relaxation_attempts_peak": "Relaxation Attempts"
|
||||||
},
|
},
|
||||||
"config_subentries": {
|
"config_subentries": {
|
||||||
"home": {
|
"home": {
|
||||||
|
|
@ -136,6 +152,7 @@
|
||||||
"general_settings": "⚙️ General Settings",
|
"general_settings": "⚙️ General Settings",
|
||||||
"display_settings": "💱 Currency Display",
|
"display_settings": "💱 Currency Display",
|
||||||
"current_interval_price_rating": "📊 Price Rating",
|
"current_interval_price_rating": "📊 Price Rating",
|
||||||
|
"price_level": "🏷️ Price Level",
|
||||||
"volatility": "💨 Price Volatility",
|
"volatility": "💨 Price Volatility",
|
||||||
"best_price": "💚 Best Price Period",
|
"best_price": "💚 Best Price Period",
|
||||||
"peak_price": "🔴 Peak Price Period",
|
"peak_price": "🔴 Peak Price Period",
|
||||||
|
|
@ -170,21 +187,36 @@
|
||||||
"submit": "↩ Save & Back"
|
"submit": "↩ Save & Back"
|
||||||
},
|
},
|
||||||
"current_interval_price_rating": {
|
"current_interval_price_rating": {
|
||||||
"title": "📊 Price Rating Thresholds",
|
"title": "📊 Price Rating Settings",
|
||||||
"description": "**Configure thresholds for price rating levels (low/normal/high) based on comparison with trailing 24-hour average.**",
|
"description": "**Configure thresholds and stabilization for price rating levels (low/normal/high) based on comparison with trailing 24-hour average.**{entity_warning}",
|
||||||
"data": {
|
"data": {
|
||||||
"price_rating_threshold_low": "Low Threshold",
|
"price_rating_threshold_low": "Low Threshold",
|
||||||
"price_rating_threshold_high": "High Threshold"
|
"price_rating_threshold_high": "High Threshold",
|
||||||
|
"price_rating_hysteresis": "Hysteresis",
|
||||||
|
"price_rating_gap_tolerance": "Gap Tolerance"
|
||||||
},
|
},
|
||||||
"data_description": {
|
"data_description": {
|
||||||
"price_rating_threshold_low": "Percentage below the trailing 24-hour average that the current price must be to qualify as 'low' rating. Example: 5 means at least 5% below average. Sensors with this rating indicate favorable time windows. Default: 5%",
|
"price_rating_threshold_low": "Percentage below the trailing 24-hour average that the current price must be to qualify as 'low' rating. Example: -10 means at least 10% below average. Sensors with this rating indicate favorable time windows. Default: -10%",
|
||||||
"price_rating_threshold_high": "Percentage above the trailing 24-hour average that the current price must be to qualify as 'high' rating. Example: 10 means at least 10% above average. Sensors with this rating warn about expensive time windows. Default: 10%"
|
"price_rating_threshold_high": "Percentage above the trailing 24-hour average that the current price must be to qualify as 'high' rating. Example: 10 means at least 10% above average. Sensors with this rating warn about expensive time windows. Default: 10%",
|
||||||
|
"price_rating_hysteresis": "Percentage band around thresholds to prevent rapid state changes. When the rating is already LOW, the price must rise above (threshold + hysteresis) to switch to NORMAL. Similarly, HIGH requires the price to fall below (threshold - hysteresis) to leave. This provides stability for automations that react to rating changes. Set to 0 to disable. Default: 2%",
|
||||||
|
"price_rating_gap_tolerance": "Maximum number of consecutive intervals that can be 'smoothed out' if they differ from surrounding ratings. Small isolated rating changes are merged into the dominant neighboring block. This provides stability for automations by preventing brief rating spikes from triggering unnecessary actions. Example: 1 means a single 'normal' interval surrounded by 'high' intervals gets corrected to 'high'. Set to 0 to disable. Default: 1"
|
||||||
|
},
|
||||||
|
"submit": "↩ Save & Back"
|
||||||
|
},
|
||||||
|
"price_level": {
|
||||||
|
"title": "🏷️ Price Level Settings",
|
||||||
|
"description": "**Configure stabilization for Tibber's price level classification (very cheap/cheap/normal/expensive/very expensive).**\n\nTibber's API provides a price level field for each interval. This setting smooths out brief fluctuations to prevent automation instability.{entity_warning}",
|
||||||
|
"data": {
|
||||||
|
"price_level_gap_tolerance": "Gap Tolerance"
|
||||||
|
},
|
||||||
|
"data_description": {
|
||||||
|
"price_level_gap_tolerance": "Maximum number of consecutive intervals that can be 'smoothed out' if they differ from surrounding price levels. Small isolated level changes are merged into the dominant neighboring block. Example: 1 means a single 'normal' interval surrounded by 'cheap' intervals gets corrected to 'cheap'. Set to 0 to disable. Default: 1"
|
||||||
},
|
},
|
||||||
"submit": "↩ Save & Back"
|
"submit": "↩ Save & Back"
|
||||||
},
|
},
|
||||||
"best_price": {
|
"best_price": {
|
||||||
"title": "💚 Best Price Period Settings",
|
"title": "💚 Best Price Period Settings",
|
||||||
"description": "**Configure settings for the Best Price Period binary sensor. This sensor is active during periods with the lowest electricity prices.**\n\n---",
|
"description": "**Configure settings for the Best Price Period binary sensor. This sensor is active during periods with the lowest electricity prices.**{entity_warning}{override_warning}\n\n---",
|
||||||
"sections": {
|
"sections": {
|
||||||
"period_settings": {
|
"period_settings": {
|
||||||
"name": "Period Duration & Levels",
|
"name": "Period Duration & Levels",
|
||||||
|
|
@ -231,7 +263,7 @@
|
||||||
},
|
},
|
||||||
"peak_price": {
|
"peak_price": {
|
||||||
"title": "🔴 Peak Price Period Settings",
|
"title": "🔴 Peak Price Period Settings",
|
||||||
"description": "**Configure settings for the Peak Price Period binary sensor. This sensor is active during periods with the highest electricity prices.**\n\n---",
|
"description": "**Configure settings for the Peak Price Period binary sensor. This sensor is active during periods with the highest electricity prices.**{entity_warning}{override_warning}\n\n---",
|
||||||
"sections": {
|
"sections": {
|
||||||
"period_settings": {
|
"period_settings": {
|
||||||
"name": "Period Settings",
|
"name": "Period Settings",
|
||||||
|
|
@ -278,20 +310,24 @@
|
||||||
},
|
},
|
||||||
"price_trend": {
|
"price_trend": {
|
||||||
"title": "📈 Price Trend Thresholds",
|
"title": "📈 Price Trend Thresholds",
|
||||||
"description": "**Configure thresholds for price trend sensors. These sensors compare current price with the average of the next N hours to determine if prices are rising, falling, or stable.**",
|
"description": "**Configure thresholds for price trend sensors.** These sensors compare current price with the average of the next N hours to determine if prices are rising, falling, or stable.\n\n**5-Level Scale:** Uses strongly_falling (-2), falling (-1), stable (0), rising (+1), strongly_rising (+2) for automation comparisons via trend_value attribute.{entity_warning}",
|
||||||
"data": {
|
"data": {
|
||||||
"price_trend_threshold_rising": "Rising Threshold",
|
"price_trend_threshold_rising": "Rising Threshold",
|
||||||
"price_trend_threshold_falling": "Falling Threshold"
|
"price_trend_threshold_strongly_rising": "Strongly Rising Threshold",
|
||||||
|
"price_trend_threshold_falling": "Falling Threshold",
|
||||||
|
"price_trend_threshold_strongly_falling": "Strongly Falling Threshold"
|
||||||
},
|
},
|
||||||
"data_description": {
|
"data_description": {
|
||||||
"price_trend_threshold_rising": "Percentage that the average of the next N hours must be above the current price to qualify as 'rising' trend. Example: 5 means average is at least 5% higher → prices will rise. Typical values: 5-15%. Default: 5%",
|
"price_trend_threshold_rising": "Percentage that the average of the next N hours must be above the current price to qualify as 'rising' trend. Example: 3 means average is at least 3% higher → prices will rise. Typical values: 3-10%. Default: 3%",
|
||||||
"price_trend_threshold_falling": "Percentage (negative) that the average of the next N hours must be below the current price to qualify as 'falling' trend. Example: -5 means average is at least 5% lower → prices will fall. Typical values: -5 to -15%. Default: -5%"
|
"price_trend_threshold_strongly_rising": "Percentage for 'strongly rising' trend. Must be higher than rising threshold. Example: 6 means average is at least 6% higher → prices will rise significantly. Typical values: 6-15%. Default: 6%",
|
||||||
|
"price_trend_threshold_falling": "Percentage (negative) that the average of the next N hours must be below the current price to qualify as 'falling' trend. Example: -3 means average is at least 3% lower → prices will fall. Typical values: -3 to -10%. Default: -3%",
|
||||||
|
"price_trend_threshold_strongly_falling": "Percentage (negative) for 'strongly falling' trend. Must be lower (more negative) than falling threshold. Example: -6 means average is at least 6% lower → prices will fall significantly. Typical values: -6 to -15%. Default: -6%"
|
||||||
},
|
},
|
||||||
"submit": "↩ Save & Back"
|
"submit": "↩ Save & Back"
|
||||||
},
|
},
|
||||||
"volatility": {
|
"volatility": {
|
||||||
"title": "💨 Price Volatility Thresholds",
|
"title": "💨 Price Volatility Thresholds",
|
||||||
"description": "**Configure thresholds for volatility classification.** Volatility measures relative price variation using the coefficient of variation (CV = standard deviation / mean × 100%). These thresholds are percentage values that work across all price levels.\n\nUsed by:\n• Volatility sensors (classification)\n• Trend sensors (adaptive threshold adjustment: <moderate = more sensitive, ≥high = less sensitive)",
|
"description": "**Configure thresholds for volatility classification.** Volatility measures relative price variation using the coefficient of variation (CV = standard deviation / mean × 100%). These thresholds are percentage values that work across all price levels.\n\nUsed by:\n• Volatility sensors (classification)\n• Trend sensors (adaptive threshold adjustment: <moderate = more sensitive, ≥high = less sensitive){entity_warning}",
|
||||||
"data": {
|
"data": {
|
||||||
"volatility_threshold_moderate": "Moderate Threshold",
|
"volatility_threshold_moderate": "Moderate Threshold",
|
||||||
"volatility_threshold_high": "High Threshold",
|
"volatility_threshold_high": "High Threshold",
|
||||||
|
|
@ -306,7 +342,7 @@
|
||||||
},
|
},
|
||||||
"chart_data_export": {
|
"chart_data_export": {
|
||||||
"title": "📊 Chart Data Export Sensor",
|
"title": "📊 Chart Data Export Sensor",
|
||||||
"description": "The Chart Data Export Sensor provides price data as sensor attributes.\n\n⚠️ **Note:** This sensor is a legacy feature for compatibility with older tools.\n\n**Recommended for new setups:** Use the `tibber_prices.get_chartdata` **service directly** - it's more flexible, efficient, and the modern Home Assistant approach.\n\n**When this sensor makes sense:**\n\n✅ Your dashboard tool can **only** read attributes (no service calls)\n✅ You need static data that updates automatically\n❌ **Not for automations:** Use `tibber_prices.get_chartdata` directly there - more flexible and efficient!\n\n---\n\n**Enable the sensor:**\n\n1. Open **Settings → Devices & Services → Tibber Prices**\n2. Select your home → Find **'Chart Data Export'** (Diagnostic section)\n3. **Enable the sensor** (disabled by default)\n\n**Configuration (optional):**\n\nDefault settings work out-of-the-box (today+tomorrow, 15-minute intervals, prices only).\n\nFor customization, add to **`configuration.yaml`**:\n\n```yaml\ntibber_prices:\n chart_export:\n day:\n - today\n - tomorrow\n include_level: true\n include_rating_level: true\n```\n\n**All parameters:** See `tibber_prices.get_chartdata` service documentation",
|
"description": "The Chart Data Export Sensor provides price data as sensor attributes.\n\n⚠️ **Note:** This sensor is a legacy feature for compatibility with older tools.\n\n**Recommended for new setups:** Use the `tibber_prices.get_chartdata` **service directly** - it's more flexible, efficient, and the modern Home Assistant approach.\n\n**When this sensor makes sense:**\n\n✅ Your dashboard tool can **only** read attributes (no service calls)\n✅ You need static data that updates automatically\n❌ **Not for automations:** Use `tibber_prices.get_chartdata` directly there - more flexible and efficient!\n\n---\n\n{sensor_status_info}",
|
||||||
"submit": "↩ Ok & Back"
|
"submit": "↩ Ok & Back"
|
||||||
},
|
},
|
||||||
"reset_to_defaults": {
|
"reset_to_defaults": {
|
||||||
|
|
@ -340,7 +376,11 @@
|
||||||
"invalid_volatility_threshold_very_high": "Very high volatility threshold must be between 35% and 80%",
|
"invalid_volatility_threshold_very_high": "Very high volatility threshold must be between 35% and 80%",
|
||||||
"invalid_volatility_thresholds": "Thresholds must be in ascending order: moderate < high < very high",
|
"invalid_volatility_thresholds": "Thresholds must be in ascending order: moderate < high < very high",
|
||||||
"invalid_price_trend_rising": "Rising trend threshold must be between 1% and 50%",
|
"invalid_price_trend_rising": "Rising trend threshold must be between 1% and 50%",
|
||||||
"invalid_price_trend_falling": "Falling trend threshold must be between -50% and -1%"
|
"invalid_price_trend_falling": "Falling trend threshold must be between -50% and -1%",
|
||||||
|
"invalid_price_trend_strongly_rising": "Strongly rising trend threshold must be between 2% and 100%",
|
||||||
|
"invalid_price_trend_strongly_falling": "Strongly falling trend threshold must be between -100% and -2%",
|
||||||
|
"invalid_trend_strongly_rising_less_than_rising": "Strongly rising threshold must be greater than rising threshold",
|
||||||
|
"invalid_trend_strongly_falling_greater_than_falling": "Strongly falling threshold must be less (more negative) than falling threshold"
|
||||||
},
|
},
|
||||||
"abort": {
|
"abort": {
|
||||||
"entry_not_found": "Tibber configuration entry not found.",
|
"entry_not_found": "Tibber configuration entry not found.",
|
||||||
|
|
@ -576,73 +616,91 @@
|
||||||
"price_trend_1h": {
|
"price_trend_1h": {
|
||||||
"name": "Price Trend (1h)",
|
"name": "Price Trend (1h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Strongly Rising",
|
||||||
"rising": "Rising",
|
"rising": "Rising",
|
||||||
|
"stable": "Stable",
|
||||||
"falling": "Falling",
|
"falling": "Falling",
|
||||||
"stable": "Stable"
|
"strongly_falling": "Strongly Falling"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_2h": {
|
"price_trend_2h": {
|
||||||
"name": "Price Trend (2h)",
|
"name": "Price Trend (2h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Strongly Rising",
|
||||||
"rising": "Rising",
|
"rising": "Rising",
|
||||||
|
"stable": "Stable",
|
||||||
"falling": "Falling",
|
"falling": "Falling",
|
||||||
"stable": "Stable"
|
"strongly_falling": "Strongly Falling"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_3h": {
|
"price_trend_3h": {
|
||||||
"name": "Price Trend (3h)",
|
"name": "Price Trend (3h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Strongly Rising",
|
||||||
"rising": "Rising",
|
"rising": "Rising",
|
||||||
|
"stable": "Stable",
|
||||||
"falling": "Falling",
|
"falling": "Falling",
|
||||||
"stable": "Stable"
|
"strongly_falling": "Strongly Falling"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_4h": {
|
"price_trend_4h": {
|
||||||
"name": "Price Trend (4h)",
|
"name": "Price Trend (4h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Strongly Rising",
|
||||||
"rising": "Rising",
|
"rising": "Rising",
|
||||||
|
"stable": "Stable",
|
||||||
"falling": "Falling",
|
"falling": "Falling",
|
||||||
"stable": "Stable"
|
"strongly_falling": "Strongly Falling"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_5h": {
|
"price_trend_5h": {
|
||||||
"name": "Price Trend (5h)",
|
"name": "Price Trend (5h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Strongly Rising",
|
||||||
"rising": "Rising",
|
"rising": "Rising",
|
||||||
|
"stable": "Stable",
|
||||||
"falling": "Falling",
|
"falling": "Falling",
|
||||||
"stable": "Stable"
|
"strongly_falling": "Strongly Falling"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_6h": {
|
"price_trend_6h": {
|
||||||
"name": "Price Trend (6h)",
|
"name": "Price Trend (6h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Strongly Rising",
|
||||||
"rising": "Rising",
|
"rising": "Rising",
|
||||||
|
"stable": "Stable",
|
||||||
"falling": "Falling",
|
"falling": "Falling",
|
||||||
"stable": "Stable"
|
"strongly_falling": "Strongly Falling"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_8h": {
|
"price_trend_8h": {
|
||||||
"name": "Price Trend (8h)",
|
"name": "Price Trend (8h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Strongly Rising",
|
||||||
"rising": "Rising",
|
"rising": "Rising",
|
||||||
|
"stable": "Stable",
|
||||||
"falling": "Falling",
|
"falling": "Falling",
|
||||||
"stable": "Stable"
|
"strongly_falling": "Strongly Falling"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_12h": {
|
"price_trend_12h": {
|
||||||
"name": "Price Trend (12h)",
|
"name": "Price Trend (12h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Strongly Rising",
|
||||||
"rising": "Rising",
|
"rising": "Rising",
|
||||||
|
"stable": "Stable",
|
||||||
"falling": "Falling",
|
"falling": "Falling",
|
||||||
"stable": "Stable"
|
"strongly_falling": "Strongly Falling"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"current_price_trend": {
|
"current_price_trend": {
|
||||||
"name": "Current Price Trend",
|
"name": "Current Price Trend",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Strongly Rising",
|
||||||
"rising": "Rising",
|
"rising": "Rising",
|
||||||
|
"stable": "Stable",
|
||||||
"falling": "Falling",
|
"falling": "Falling",
|
||||||
"stable": "Stable"
|
"strongly_falling": "Strongly Falling"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"next_price_trend_change": {
|
"next_price_trend_change": {
|
||||||
|
|
@ -844,6 +902,52 @@
|
||||||
"realtime_consumption_enabled": {
|
"realtime_consumption_enabled": {
|
||||||
"name": "Realtime Consumption Enabled"
|
"name": "Realtime Consumption Enabled"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"number": {
|
||||||
|
"best_price_flex_override": {
|
||||||
|
"name": "Best Price: Flexibility"
|
||||||
|
},
|
||||||
|
"best_price_min_distance_override": {
|
||||||
|
"name": "Best Price: Minimum Distance"
|
||||||
|
},
|
||||||
|
"best_price_min_period_length_override": {
|
||||||
|
"name": "Best Price: Minimum Period Length"
|
||||||
|
},
|
||||||
|
"best_price_min_periods_override": {
|
||||||
|
"name": "Best Price: Minimum Periods"
|
||||||
|
},
|
||||||
|
"best_price_relaxation_attempts_override": {
|
||||||
|
"name": "Best Price: Relaxation Attempts"
|
||||||
|
},
|
||||||
|
"best_price_gap_count_override": {
|
||||||
|
"name": "Best Price: Gap Tolerance"
|
||||||
|
},
|
||||||
|
"peak_price_flex_override": {
|
||||||
|
"name": "Peak Price: Flexibility"
|
||||||
|
},
|
||||||
|
"peak_price_min_distance_override": {
|
||||||
|
"name": "Peak Price: Minimum Distance"
|
||||||
|
},
|
||||||
|
"peak_price_min_period_length_override": {
|
||||||
|
"name": "Peak Price: Minimum Period Length"
|
||||||
|
},
|
||||||
|
"peak_price_min_periods_override": {
|
||||||
|
"name": "Peak Price: Minimum Periods"
|
||||||
|
},
|
||||||
|
"peak_price_relaxation_attempts_override": {
|
||||||
|
"name": "Peak Price: Relaxation Attempts"
|
||||||
|
},
|
||||||
|
"peak_price_gap_count_override": {
|
||||||
|
"name": "Peak Price: Gap Tolerance"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"switch": {
|
||||||
|
"best_price_enable_relaxation_override": {
|
||||||
|
"name": "Best Price: Achieve Minimum Count"
|
||||||
|
},
|
||||||
|
"peak_price_enable_relaxation_override": {
|
||||||
|
"name": "Peak Price: Achieve Minimum Count"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"issues": {
|
"issues": {
|
||||||
|
|
@ -906,6 +1010,14 @@
|
||||||
"highlight_best_price": {
|
"highlight_best_price": {
|
||||||
"name": "Highlight Best Price Periods",
|
"name": "Highlight Best Price Periods",
|
||||||
"description": "Add a semi-transparent green overlay to highlight the best price periods on the chart. This makes it easy to visually identify the optimal times for energy consumption."
|
"description": "Add a semi-transparent green overlay to highlight the best price periods on the chart. This makes it easy to visually identify the optimal times for energy consumption."
|
||||||
|
},
|
||||||
|
"highlight_peak_price": {
|
||||||
|
"name": "Highlight Peak Price Periods",
|
||||||
|
"description": "Add a semi-transparent red overlay to highlight the peak price periods on the chart. This makes it easy to visually identify times when energy is most expensive."
|
||||||
|
},
|
||||||
|
"resolution": {
|
||||||
|
"name": "Resolution",
|
||||||
|
"description": "Time resolution for the chart data. 'interval' (default): Original 15-minute intervals (96 points per day). 'hourly': Aggregated hourly values using a rolling 60-minute window (24 points per day) for a cleaner, less cluttered chart."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
@ -1050,6 +1162,16 @@
|
||||||
"description": "The config entry ID for the Tibber integration."
|
"description": "The config entry ID for the Tibber integration."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"debug_clear_tomorrow": {
|
||||||
|
"name": "Debug: Clear Tomorrow Data",
|
||||||
|
"description": "DEBUG/TESTING: Removes tomorrow's price data from the interval pool cache. Use this to test the tomorrow data refresh cycle without waiting for the next day. After calling this service, the lifecycle sensor will show 'searching_tomorrow' (after 13:00) and the next Timer #1 cycle will fetch new data from the API.",
|
||||||
|
"fields": {
|
||||||
|
"entry_id": {
|
||||||
|
"name": "Entry ID",
|
||||||
|
"description": "Optional config entry ID. If not provided, uses the first available entry."
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"selector": {
|
"selector": {
|
||||||
|
|
|
||||||
|
|
@ -11,14 +11,14 @@
|
||||||
},
|
},
|
||||||
"new_token": {
|
"new_token": {
|
||||||
"title": "Skriv inn API-token",
|
"title": "Skriv inn API-token",
|
||||||
"description": "Sett opp Tibber Prisinformasjon & Vurderinger.\n\nFor å generere et API-tilgangstoken, besøk https://developer.tibber.com.",
|
"description": "Sett opp Tibber Prisinformasjon & Vurderinger.\n\nFor å generere et API-tilgangstoken, besøk [{tibber_url}]({tibber_url}).",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API-tilgangstoken"
|
"access_token": "API-tilgangstoken"
|
||||||
},
|
},
|
||||||
"submit": "Valider token"
|
"submit": "Valider token"
|
||||||
},
|
},
|
||||||
"user": {
|
"user": {
|
||||||
"description": "Sett opp Tibber Prisinformasjon & Vurderinger.\n\nFor å generere et API-tilgangstoken, besøk https://developer.tibber.com.",
|
"description": "Sett opp Tibber Prisinformasjon & Vurderinger.\n\nFor å generere et API-tilgangstoken, besøk [{tibber_url}]({tibber_url}).",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API-tilgangstoken"
|
"access_token": "API-tilgangstoken"
|
||||||
},
|
},
|
||||||
|
|
@ -42,7 +42,7 @@
|
||||||
},
|
},
|
||||||
"reauth_confirm": {
|
"reauth_confirm": {
|
||||||
"title": "Autentiser Tibber Prisintegrasjonen på nytt",
|
"title": "Autentiser Tibber Prisintegrasjonen på nytt",
|
||||||
"description": "Tilgangstokenet for Tibber er ikke lenger gyldig. Vennligst oppgi et nytt API-tilgangstoken for å fortsette å bruke denne integrasjonen.\n\nFor å generere et nytt API-tilgangstoken, besøk https://developer.tibber.com.",
|
"description": "Tilgangstokenet for Tibber er ikke lenger gyldig. Vennligst oppgi et nytt API-tilgangstoken for å fortsette å bruke denne integrasjonen.\n\nFor å generere et nytt API-tilgangstoken, besøk [{tibber_url}]({tibber_url}).",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API-tilgangstoken"
|
"access_token": "API-tilgangstoken"
|
||||||
},
|
},
|
||||||
|
|
@ -77,7 +77,23 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"common": {
|
"common": {
|
||||||
"step_progress": "{step_num} / {total_steps}"
|
"step_progress": "{step_num} / {total_steps}",
|
||||||
|
"override_warning_template": "⚠️ {fields} styres av konfigurasjons-entitet",
|
||||||
|
"override_warning_and": "og",
|
||||||
|
"override_field_label_best_price_min_period_length": "Minste periodelengde",
|
||||||
|
"override_field_label_best_price_max_level_gap_count": "Gaptoleranse",
|
||||||
|
"override_field_label_best_price_flex": "Fleksibilitet",
|
||||||
|
"override_field_label_best_price_min_distance_from_avg": "Minimumsavstand",
|
||||||
|
"override_field_label_enable_min_periods_best": "Oppnå minimum antall",
|
||||||
|
"override_field_label_min_periods_best": "Minimumperioder",
|
||||||
|
"override_field_label_relaxation_attempts_best": "Avslapningsforsøk",
|
||||||
|
"override_field_label_peak_price_min_period_length": "Minste periodelengde",
|
||||||
|
"override_field_label_peak_price_max_level_gap_count": "Gaptoleranse",
|
||||||
|
"override_field_label_peak_price_flex": "Fleksibilitet",
|
||||||
|
"override_field_label_peak_price_min_distance_from_avg": "Minimumsavstand",
|
||||||
|
"override_field_label_enable_min_periods_peak": "Oppnå minimum antall",
|
||||||
|
"override_field_label_min_periods_peak": "Minimumperioder",
|
||||||
|
"override_field_label_relaxation_attempts_peak": "Avslapningsforsøk"
|
||||||
},
|
},
|
||||||
"config_subentries": {
|
"config_subentries": {
|
||||||
"home": {
|
"home": {
|
||||||
|
|
@ -136,6 +152,7 @@
|
||||||
"general_settings": "⚙️ Generelle innstillinger",
|
"general_settings": "⚙️ Generelle innstillinger",
|
||||||
"display_settings": "💱 Valutavisning",
|
"display_settings": "💱 Valutavisning",
|
||||||
"current_interval_price_rating": "📊 Prisvurdering",
|
"current_interval_price_rating": "📊 Prisvurdering",
|
||||||
|
"price_level": "🏷️ Prisnivå",
|
||||||
"volatility": "💨 Prisvolatilitet",
|
"volatility": "💨 Prisvolatilitet",
|
||||||
"best_price": "💚 Beste prisperiode",
|
"best_price": "💚 Beste prisperiode",
|
||||||
"peak_price": "🔴 Toppprisperiode",
|
"peak_price": "🔴 Toppprisperiode",
|
||||||
|
|
@ -170,21 +187,25 @@
|
||||||
"submit": "↩ Lagre & tilbake"
|
"submit": "↩ Lagre & tilbake"
|
||||||
},
|
},
|
||||||
"current_interval_price_rating": {
|
"current_interval_price_rating": {
|
||||||
"title": "📊 Prisvurderings-terskler",
|
"title": "📊 Prisvurderingsinnstillinger",
|
||||||
"description": "**Konfigurer terskler for prisvurderingsnivåer (lav/normal/høy) basert på sammenligning med etterfølgende 24-timers gjennomsnitt.**",
|
"description": "**Konfigurer terskler og stabilisering for prisvurderingsnivåer (lav/normal/høy) basert på sammenligning med etterfølgende 24-timers gjennomsnitt.**{entity_warning}",
|
||||||
"data": {
|
"data": {
|
||||||
"price_rating_threshold_low": "Lav-terskel",
|
"price_rating_threshold_low": "Lav-terskel",
|
||||||
"price_rating_threshold_high": "Høy-terskel"
|
"price_rating_threshold_high": "Høy-terskel",
|
||||||
|
"price_rating_hysteresis": "Hysterese",
|
||||||
|
"price_rating_gap_tolerance": "Gap-toleranse"
|
||||||
},
|
},
|
||||||
"data_description": {
|
"data_description": {
|
||||||
"price_rating_threshold_low": "Prosentverdi for hvor mye gjeldende pris må være under det etterfølgende 24-timers gjennomsnittet for å kvalifisere som 'lav' vurdering. Eksempel: 5 betyr minst 5% under gjennomsnitt. Sensorer med denne vurderingen indikerer gunstige tidsvinduer. Standard: 5%",
|
"price_rating_threshold_low": "Prosentverdi for hvor mye gjeldende pris må være under det etterfølgende 24-timers gjennomsnittet for å kvalifisere som 'lav' vurdering. Eksempel: -10 betyr minst 10% under gjennomsnitt. Sensorer med denne vurderingen indikerer gunstige tidsvinduer. Standard: -10%",
|
||||||
"price_rating_threshold_high": "Prosentverdi for hvor mye gjeldende pris må være over det etterfølgende 24-timers gjennomsnittet for å kvalifisere som 'høy' vurdering. Eksempel: 10 betyr minst 10% over gjennomsnitt. Sensorer med denne vurderingen advarer om dyre tidsvinduer. Standard: 10%"
|
"price_rating_threshold_high": "Prosentverdi for hvor mye gjeldende pris må være over det etterfølgende 24-timers gjennomsnittet for å kvalifisere som 'høy' vurdering. Eksempel: 10 betyr minst 10% over gjennomsnitt. Sensorer med denne vurderingen advarer om dyre tidsvinduer. Standard: 10%",
|
||||||
|
"price_rating_hysteresis": "Prosentbånd rundt terskler for å unngå raske tilstandsendringer. Når vurderingen allerede er LAV, må prisen stige over (terskel + hysterese) for å bytte til NORMAL. Tilsvarende krever HØY at prisen faller under (terskel - hysterese) for å forlate tilstanden. Dette gir stabilitet for automatiseringer som reagerer på vurderingsendringer. Sett til 0 for å deaktivere. Standard: 2%",
|
||||||
|
"price_rating_gap_tolerance": "Maksimalt antall påfølgende intervaller som kan 'jevnes ut' hvis de avviker fra omkringliggende vurderinger. Små isolerte vurderingsendringer slås sammen med den dominerende nabogruppen. Dette gir stabilitet for automatiseringer ved å forhindre at korte vurderingstopper utløser unødvendige handlinger. Eksempel: 1 betyr at et enkelt 'normal'-intervall omgitt av 'høy'-intervaller korrigeres til 'høy'. Sett til 0 for å deaktivere. Standard: 1"
|
||||||
},
|
},
|
||||||
"submit": "↩ Lagre & tilbake"
|
"submit": "↩ Lagre & tilbake"
|
||||||
},
|
},
|
||||||
"best_price": {
|
"best_price": {
|
||||||
"title": "💚 Beste Prisperiode Innstillinger",
|
"title": "💚 Beste Prisperiode Innstillinger",
|
||||||
"description": "**Konfigurer innstillinger for Beste Prisperiode binærsensor. Denne sensoren er aktiv i perioder med de laveste strømprisene.**\n\n---",
|
"description": "**Konfigurer innstillinger for Beste Prisperiode binærsensor. Denne sensoren er aktiv i perioder med de laveste strømprisene.**{entity_warning}{override_warning}\n\n---",
|
||||||
"sections": {
|
"sections": {
|
||||||
"period_settings": {
|
"period_settings": {
|
||||||
"name": "Periodeinnstillinger",
|
"name": "Periodeinnstillinger",
|
||||||
|
|
@ -231,7 +252,7 @@
|
||||||
},
|
},
|
||||||
"peak_price": {
|
"peak_price": {
|
||||||
"title": "🔴 Toppprisperiode Innstillinger",
|
"title": "🔴 Toppprisperiode Innstillinger",
|
||||||
"description": "**Konfigurer innstillinger for Toppprisperiode binærsensor. Denne sensoren er aktiv i perioder med de høyeste strømprisene.**\n\n---",
|
"description": "**Konfigurer innstillinger for Toppprisperiode binærsensor. Denne sensoren er aktiv i perioder med de høyeste strømprisene.**{entity_warning}{override_warning}\n\n---",
|
||||||
"sections": {
|
"sections": {
|
||||||
"period_settings": {
|
"period_settings": {
|
||||||
"name": "Periodeinnstillinger",
|
"name": "Periodeinnstillinger",
|
||||||
|
|
@ -278,35 +299,39 @@
|
||||||
},
|
},
|
||||||
"price_trend": {
|
"price_trend": {
|
||||||
"title": "📈 Pristrendterskler",
|
"title": "📈 Pristrendterskler",
|
||||||
"description": "**Konfigurer terskler for pristrendsensorer. Disse sensorene sammenligner nåværende pris med gjennomsnittet av de neste N timene for å bestemme om prisene stiger, faller eller er stabile.**",
|
"description": "**Konfigurer terskler for pristrendsensorer. Disse sensorene sammenligner nåværende pris med gjennomsnittet av de neste N timene for å bestemme om prisene stiger sterkt, stiger, er stabile, faller eller faller sterkt.**{entity_warning}",
|
||||||
"data": {
|
"data": {
|
||||||
"price_trend_threshold_rising": "Stigende terskel",
|
"price_trend_threshold_rising": "Stigende terskel",
|
||||||
"price_trend_threshold_falling": "Fallende terskel"
|
"price_trend_threshold_strongly_rising": "Sterkt stigende terskel",
|
||||||
|
"price_trend_threshold_falling": "Fallende terskel",
|
||||||
|
"price_trend_threshold_strongly_falling": "Sterkt fallende terskel"
|
||||||
},
|
},
|
||||||
"data_description": {
|
"data_description": {
|
||||||
"price_trend_threshold_rising": "Prosentverdi som gjennomsnittet av de neste N timene må være over den nåværende prisen for å kvalifisere som 'stigende' trend. Eksempel: 5 betyr gjennomsnittet er minst 5% høyere → prisene vil stige. Typiske verdier: 5-15%. Standard: 5%",
|
"price_trend_threshold_rising": "Prosentverdi som gjennomsnittet av de neste N timene må være over den nåværende prisen for å kvalifisere som 'stigende' trend. Eksempel: 3 betyr gjennomsnittet er minst 3% høyere → prisene vil stige. Typiske verdier: 3-10%. Standard: 3%",
|
||||||
"price_trend_threshold_falling": "Prosentverdi (negativ) som gjennomsnittet av de neste N timene må være under den nåværende prisen for å kvalifisere som 'synkende' trend. Eksempel: -5 betyr gjennomsnittet er minst 5% lavere → prisene vil falle. Typiske verdier: -5 til -15%. Standard: -5%"
|
"price_trend_threshold_strongly_rising": "Prosentverdi som gjennomsnittet av de neste N timene må være over den nåværende prisen for å kvalifisere som 'sterkt stigende' trend. Må være høyere enn stigende terskel. Typiske verdier: 6-20%. Standard: 6%",
|
||||||
|
"price_trend_threshold_falling": "Prosentverdi (negativ) som gjennomsnittet av de neste N timene må være under den nåværende prisen for å kvalifisere som 'synkende' trend. Eksempel: -3 betyr gjennomsnittet er minst 3% lavere → prisene vil falle. Typiske verdier: -3 til -10%. Standard: -3%",
|
||||||
|
"price_trend_threshold_strongly_falling": "Prosentverdi (negativ) som gjennomsnittet av de neste N timene må være under den nåværende prisen for å kvalifisere som 'sterkt synkende' trend. Må være lavere (mer negativ) enn fallende terskel. Typiske verdier: -6 til -20%. Standard: -6%"
|
||||||
},
|
},
|
||||||
"submit": "↩ Lagre & tilbake"
|
"submit": "↩ Lagre & tilbake"
|
||||||
},
|
},
|
||||||
"volatility": {
|
"volatility": {
|
||||||
"title": "💨 Volatilitets-terskler",
|
"title": "💨 Volatilitets-terskler",
|
||||||
"description": "**Konfigurer terskler for volatilitetsklassifisering.** Volatilitet måler relativ prisvariation ved hjelp av variasjonskoeffisienten (VK = standardavvik / gjennomsnitt × 100%). Disse tersklene er prosentverdier som fungerer på tvers av alle prisnivåer.\n\nBrukes av:\n• Volatilitetssensorer (klassifisering)\n• Trendsensorer (adaptiv terskel justering: <moderat = mer følsom, ≥høy = mindre følsom)",
|
"description": "**Konfigurer terskler for volatilitetsklassifisering.** Volatilitet måler relativ prisvariation ved hjelp av variasjonskoeffisienten (VK = standardavvik / gjennomsnitt × 100%). Disse tersklene er prosentverdier som fungerer på tvers av alle prisnivåer.\n\nBrukes av:\n• Volatilitetssensorer (klassifisering)\n• Trendsensorer (adaptiv terskel justering: <moderat = mer følsom, ≥høy = mindre følsom){entity_warning}",
|
||||||
"data": {
|
"data": {
|
||||||
"volatility_threshold_moderate": "Moderat terskel",
|
"volatility_threshold_moderate": "Moderat terskel",
|
||||||
"volatility_threshold_high": "Høy terskel",
|
"volatility_threshold_high": "Høy terskel",
|
||||||
"volatility_threshold_very_high": "Veldig høy terskel"
|
"volatility_threshold_very_high": "Veldig høy terskel"
|
||||||
},
|
},
|
||||||
"data_description": {
|
"data_description": {
|
||||||
"volatility_threshold_moderate": "Grenseverdi for standardavvik (% av gjennomsnitt) for å klassifisere prisvariasjonen som 'moderat'. Eksempel: 10 betyr standardavvik ≥ 10% av gjennomsnitt. Dette indikerer økt prisustabilitet. Standard: 10%",
|
"volatility_threshold_moderate": "Variasjonskoeffisient (VK) der prisene anses som 'moderat volatile'. VK = (standardavvik / gjennomsnitt) × 100%. Eksempel: 15 betyr prissvingninger på ±15% rundt gjennomsnittet. Sensorer viser denne klassifiseringen, trendsensorer blir mer følsomme. Standard: 15%",
|
||||||
"volatility_threshold_high": "Grenseverdi for standardavvik (% av gjennomsnitt) for å klassifisere prisvariasjonen som 'høy'. Eksempel: 20 betyr standardavvik ≥ 20% av gjennomsnitt. Dette indikerer betydelige prissvingninger. Standard: 20%",
|
"volatility_threshold_high": "Variasjonskoeffisient (VK) der prisene anses som 'svært volatile'. Eksempel: 30 betyr prissvingninger på ±30% rundt gjennomsnittet. Større prishopp forventes, trendsensorer blir mindre følsomme. Standard: 30%",
|
||||||
"volatility_threshold_very_high": "Grenseverdi for standardavvik (% av gjennomsnitt) for å klassifisere prisvariasjonen som 'veldig høy'. Eksempel: 30 betyr standardavvik ≥ 30% av gjennomsnitt. Dette indikerer ekstrem prisustabilitet. Standard: 30%"
|
"volatility_threshold_very_high": "Variasjonskoeffisient (VK) der prisene anses som 'veldig svært volatile'. Eksempel: 50 betyr ekstreme prissvingninger på ±50% rundt gjennomsnittet. På slike dager er sterke pristoppsannsynlige. Standard: 50%"
|
||||||
},
|
},
|
||||||
"submit": "↩ Lagre & tilbake"
|
"submit": "↩ Lagre & tilbake"
|
||||||
},
|
},
|
||||||
"chart_data_export": {
|
"chart_data_export": {
|
||||||
"title": "📊 Diagram-dataeksport Sensor",
|
"title": "📊 Diagram-dataeksport Sensor",
|
||||||
"description": "Diagram-dataeksport-sensoren gir prisdata som sensorattributter.\n\n⚠️ **Merk:** Denne sensoren er en legacy-funksjon for kompatibilitet med eldre verktøy.\n\n**Anbefalt for nye oppsett:** Bruk `tibber_prices.get_chartdata` **tjenesten direkte** - den er mer fleksibel, effektiv og den moderne Home Assistant-tilnærmingen.\n\n**Når denne sensoren gir mening:**\n\n✅ Dashboardverktøyet ditt kan **kun** lese attributter (ingen tjenestekall)\n✅ Du trenger statiske data som oppdateres automatisk\n❌ **Ikke for automatiseringer:** Bruk `tibber_prices.get_chartdata` direkte der - mer fleksibel og effektiv!\n\n---\n\n**Aktiver sensoren:**\n\n1. Åpne **Innstillinger → Enheter og tjenester → Tibber Prices**\n2. Velg ditt hjem → Finn **'Diagramdataeksport'** (Diagnostikk-seksjonen)\n3. **Aktiver sensoren** (deaktivert som standard)\n\n**Konfigurasjon (valgfritt):**\n\nStandardinnstillinger fungerer umiddelbart (i dag+i morgen, 15-minutters intervaller, bare priser).\n\nFor tilpasning, legg til i **`configuration.yaml`**:\n\n```yaml\ntibber_prices:\n chart_export:\n day:\n - today\n - tomorrow\n include_level: true\n include_rating_level: true\n```\n\n**Alle parametere:** Se `tibber_prices.get_chartdata` tjenestens dokumentasjon",
|
"description": "Diagram-dataeksport-sensoren gir prisdata som sensorattributter.\n\n⚠️ **Merk:** Denne sensoren er en legacy-funksjon for kompatibilitet med eldre verktøy.\n\n**Anbefalt for nye oppsett:** Bruk `tibber_prices.get_chartdata` **tjenesten direkte** - den er mer fleksibel, effektiv og den moderne Home Assistant-tilnærmingen.\n\n**Når denne sensoren gir mening:**\n\n✅ Dashboardverktøyet ditt kan **kun** lese attributter (ingen tjenestekall)\n✅ Du trenger statiske data som oppdateres automatisk\n❌ **Ikke for automatiseringer:** Bruk `tibber_prices.get_chartdata` direkte der - mer fleksibel og effektiv!\n\n---\n\n{sensor_status_info}",
|
||||||
"submit": "↩ Ok & tilbake"
|
"submit": "↩ Ok & tilbake"
|
||||||
},
|
},
|
||||||
"reset_to_defaults": {
|
"reset_to_defaults": {
|
||||||
|
|
@ -316,6 +341,17 @@
|
||||||
"confirm_reset": "Ja, tilbakestill alt til standard"
|
"confirm_reset": "Ja, tilbakestill alt til standard"
|
||||||
},
|
},
|
||||||
"submit": "Tilbakestill nå"
|
"submit": "Tilbakestill nå"
|
||||||
|
},
|
||||||
|
"price_level": {
|
||||||
|
"title": "🏷️ Prisnivå-innstillinger",
|
||||||
|
"description": "**Konfigurer stabilisering for Tibbers prisnivå-klassifisering (veldig billig/billig/normal/dyr/veldig dyr).**\n\nTibbers API gir et prisnivå-felt for hvert intervall. Denne innstillingen jevner ut korte svingninger for å forhindre ustabilitet i automatiseringer.{entity_warning}",
|
||||||
|
"data": {
|
||||||
|
"price_level_gap_tolerance": "Gap-toleranse"
|
||||||
|
},
|
||||||
|
"data_description": {
|
||||||
|
"price_level_gap_tolerance": "Maksimalt antall påfølgende intervaller som kan 'jevnes ut' hvis de avviker fra omkringliggende prisnivåer. Små isolerte nivåendringer slås sammen med den dominerende nabogruppen. Eksempel: 1 betyr at et enkelt 'normal'-intervall omgitt av 'billig'-intervaller korrigeres til 'billig'. Sett til 0 for å deaktivere. Standard: 1"
|
||||||
|
},
|
||||||
|
"submit": "↩ Lagre & tilbake"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"error": {
|
"error": {
|
||||||
|
|
@ -340,7 +376,11 @@
|
||||||
"invalid_volatility_threshold_very_high": "Svært høy volatilitetsgrense må være mellom 35% og 80%",
|
"invalid_volatility_threshold_very_high": "Svært høy volatilitetsgrense må være mellom 35% og 80%",
|
||||||
"invalid_volatility_thresholds": "Grensene må være i stigende rekkefølge: moderat < høy < svært høy",
|
"invalid_volatility_thresholds": "Grensene må være i stigende rekkefølge: moderat < høy < svært høy",
|
||||||
"invalid_price_trend_rising": "Stigende trendgrense må være mellom 1% og 50%",
|
"invalid_price_trend_rising": "Stigende trendgrense må være mellom 1% og 50%",
|
||||||
"invalid_price_trend_falling": "Fallende trendgrense må være mellom -50% og -1%"
|
"invalid_price_trend_falling": "Fallende trendgrense må være mellom -50% og -1%",
|
||||||
|
"invalid_price_trend_strongly_rising": "Sterkt stigende trendgrense må være mellom 2% og 100%",
|
||||||
|
"invalid_price_trend_strongly_falling": "Sterkt fallende trendgrense må være mellom -100% og -2%",
|
||||||
|
"invalid_trend_strongly_rising_less_than_rising": "Sterkt stigende-grense må være høyere enn stigende-grense",
|
||||||
|
"invalid_trend_strongly_falling_greater_than_falling": "Sterkt fallende-grense må være lavere (mer negativ) enn fallende-grense"
|
||||||
},
|
},
|
||||||
"abort": {
|
"abort": {
|
||||||
"entry_not_found": "Tibber-konfigurasjonsoppføring ikke funnet.",
|
"entry_not_found": "Tibber-konfigurasjonsoppføring ikke funnet.",
|
||||||
|
|
@ -576,73 +616,91 @@
|
||||||
"price_trend_1h": {
|
"price_trend_1h": {
|
||||||
"name": "Pristrend (1t)",
|
"name": "Pristrend (1t)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Sterkt stigende",
|
||||||
"rising": "Stigende",
|
"rising": "Stigende",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallende",
|
"falling": "Fallende",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Sterkt fallende"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_2h": {
|
"price_trend_2h": {
|
||||||
"name": "Pristrend (2t)",
|
"name": "Pristrend (2t)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Sterkt stigende",
|
||||||
"rising": "Stigende",
|
"rising": "Stigende",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallende",
|
"falling": "Fallende",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Sterkt fallende"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_3h": {
|
"price_trend_3h": {
|
||||||
"name": "Pristrend (3t)",
|
"name": "Pristrend (3t)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Sterkt stigende",
|
||||||
"rising": "Stigende",
|
"rising": "Stigende",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallende",
|
"falling": "Fallende",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Sterkt fallende"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_4h": {
|
"price_trend_4h": {
|
||||||
"name": "Pristrend (4t)",
|
"name": "Pristrend (4t)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Sterkt stigende",
|
||||||
"rising": "Stigende",
|
"rising": "Stigende",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallende",
|
"falling": "Fallende",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Sterkt fallende"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_5h": {
|
"price_trend_5h": {
|
||||||
"name": "Pristrend (5t)",
|
"name": "Pristrend (5t)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Sterkt stigende",
|
||||||
"rising": "Stigende",
|
"rising": "Stigende",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallende",
|
"falling": "Fallende",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Sterkt fallende"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_6h": {
|
"price_trend_6h": {
|
||||||
"name": "Pristrend (6t)",
|
"name": "Pristrend (6t)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Sterkt stigende",
|
||||||
"rising": "Stigende",
|
"rising": "Stigende",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallende",
|
"falling": "Fallende",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Sterkt fallende"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_8h": {
|
"price_trend_8h": {
|
||||||
"name": "Pristrend (8t)",
|
"name": "Pristrend (8t)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Sterkt stigende",
|
||||||
"rising": "Stigende",
|
"rising": "Stigende",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallende",
|
"falling": "Fallende",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Sterkt fallende"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_12h": {
|
"price_trend_12h": {
|
||||||
"name": "Pristrend (12t)",
|
"name": "Pristrend (12t)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Sterkt stigende",
|
||||||
"rising": "Stigende",
|
"rising": "Stigende",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallende",
|
"falling": "Fallende",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Sterkt fallende"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"current_price_trend": {
|
"current_price_trend": {
|
||||||
"name": "Nåværende pristrend",
|
"name": "Nåværende pristrend",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Sterkt stigende",
|
||||||
"rising": "Stigende",
|
"rising": "Stigende",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallende",
|
"falling": "Fallende",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Sterkt fallende"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"next_price_trend_change": {
|
"next_price_trend_change": {
|
||||||
|
|
@ -844,6 +902,52 @@
|
||||||
"realtime_consumption_enabled": {
|
"realtime_consumption_enabled": {
|
||||||
"name": "Sanntidsforbruk aktivert"
|
"name": "Sanntidsforbruk aktivert"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"number": {
|
||||||
|
"best_price_flex_override": {
|
||||||
|
"name": "Beste pris: Fleksibilitet"
|
||||||
|
},
|
||||||
|
"best_price_min_distance_override": {
|
||||||
|
"name": "Beste pris: Minimumsavstand"
|
||||||
|
},
|
||||||
|
"best_price_min_period_length_override": {
|
||||||
|
"name": "Beste pris: Minimum periodelengde"
|
||||||
|
},
|
||||||
|
"best_price_min_periods_override": {
|
||||||
|
"name": "Beste pris: Minimum perioder"
|
||||||
|
},
|
||||||
|
"best_price_relaxation_attempts_override": {
|
||||||
|
"name": "Beste pris: Lemping forsøk"
|
||||||
|
},
|
||||||
|
"best_price_gap_count_override": {
|
||||||
|
"name": "Beste pris: Gaptoleranse"
|
||||||
|
},
|
||||||
|
"peak_price_flex_override": {
|
||||||
|
"name": "Topppris: Fleksibilitet"
|
||||||
|
},
|
||||||
|
"peak_price_min_distance_override": {
|
||||||
|
"name": "Topppris: Minimumsavstand"
|
||||||
|
},
|
||||||
|
"peak_price_min_period_length_override": {
|
||||||
|
"name": "Topppris: Minimum periodelengde"
|
||||||
|
},
|
||||||
|
"peak_price_min_periods_override": {
|
||||||
|
"name": "Topppris: Minimum perioder"
|
||||||
|
},
|
||||||
|
"peak_price_relaxation_attempts_override": {
|
||||||
|
"name": "Topppris: Lemping forsøk"
|
||||||
|
},
|
||||||
|
"peak_price_gap_count_override": {
|
||||||
|
"name": "Topppris: Gaptoleranse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"switch": {
|
||||||
|
"best_price_enable_relaxation_override": {
|
||||||
|
"name": "Beste pris: Oppnå minimumsantall"
|
||||||
|
},
|
||||||
|
"peak_price_enable_relaxation_override": {
|
||||||
|
"name": "Topppris: Oppnå minimumsantall"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"issues": {
|
"issues": {
|
||||||
|
|
@ -906,6 +1010,14 @@
|
||||||
"highlight_best_price": {
|
"highlight_best_price": {
|
||||||
"name": "Fremhev beste prisperioder",
|
"name": "Fremhev beste prisperioder",
|
||||||
"description": "Legg til et halvgjennomsiktig grønt overlegg for å fremheve de beste prisperiodene i diagrammet. Dette gjør det enkelt å visuelt identifisere de optimale tidene for energiforbruk."
|
"description": "Legg til et halvgjennomsiktig grønt overlegg for å fremheve de beste prisperiodene i diagrammet. Dette gjør det enkelt å visuelt identifisere de optimale tidene for energiforbruk."
|
||||||
|
},
|
||||||
|
"highlight_peak_price": {
|
||||||
|
"name": "Fremhev høyeste prisperioder",
|
||||||
|
"description": "Legg til et halvgjennomsiktig rødt overlegg for å fremheve de høyeste prisperiodene i diagrammet. Dette gjør det enkelt å visuelt identifisere tidene når energi er dyrest."
|
||||||
|
},
|
||||||
|
"resolution": {
|
||||||
|
"name": "Oppløsning",
|
||||||
|
"description": "Tidsoppløsning for diagramdata. 'interval' (standard): Opprinnelige 15-minutters intervaller (96 punkter per dag). 'hourly': Aggregerte timeverdier med et rullende 60-minutters vindu (24 punkter per dag) for et ryddigere og mindre rotete diagram."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -11,14 +11,14 @@
|
||||||
},
|
},
|
||||||
"new_token": {
|
"new_token": {
|
||||||
"title": "Voer API-Token In",
|
"title": "Voer API-Token In",
|
||||||
"description": "Stel Tibber Prijsinformatie & Beoordelingen in.\n\nOm een API-toegangstoken te genereren, bezoek https://developer.tibber.com.",
|
"description": "Stel Tibber Prijsinformatie & Beoordelingen in.\n\nOm een API-toegangstoken te genereren, bezoek [{tibber_url}]({tibber_url}).",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API-toegangstoken"
|
"access_token": "API-toegangstoken"
|
||||||
},
|
},
|
||||||
"submit": "Token valideren"
|
"submit": "Token valideren"
|
||||||
},
|
},
|
||||||
"user": {
|
"user": {
|
||||||
"description": "Stel Tibber Prijsinformatie & Beoordelingen in.\n\nOm een API-toegangstoken te genereren, bezoek https://developer.tibber.com.",
|
"description": "Stel Tibber Prijsinformatie & Beoordelingen in.\n\nOm een API-toegangstoken te genereren, bezoek [{tibber_url}]({tibber_url}).",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API-toegangstoken"
|
"access_token": "API-toegangstoken"
|
||||||
},
|
},
|
||||||
|
|
@ -42,7 +42,7 @@
|
||||||
},
|
},
|
||||||
"reauth_confirm": {
|
"reauth_confirm": {
|
||||||
"title": "Tibber Price Integratie Opnieuw Authenticeren",
|
"title": "Tibber Price Integratie Opnieuw Authenticeren",
|
||||||
"description": "Het toegangstoken voor Tibber is niet langer geldig. Voer een nieuw API-toegangstoken in om deze integratie te blijven gebruiken.\n\nOm een nieuw API-toegangstoken te genereren, bezoek https://developer.tibber.com.",
|
"description": "Het toegangstoken voor Tibber is niet langer geldig. Voer een nieuw API-toegangstoken in om deze integratie te blijven gebruiken.\n\nOm een nieuw API-toegangstoken te genereren, bezoek [{tibber_url}]({tibber_url}).",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API-toegangstoken"
|
"access_token": "API-toegangstoken"
|
||||||
},
|
},
|
||||||
|
|
@ -77,7 +77,23 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"common": {
|
"common": {
|
||||||
"step_progress": "{step_num} / {total_steps}"
|
"step_progress": "{step_num} / {total_steps}",
|
||||||
|
"override_warning_template": "⚠️ {fields} wordt beheerd door configuratie-entiteit",
|
||||||
|
"override_warning_and": "en",
|
||||||
|
"override_field_label_best_price_min_period_length": "Minimale periodelengte",
|
||||||
|
"override_field_label_best_price_max_level_gap_count": "Gaptolerantie",
|
||||||
|
"override_field_label_best_price_flex": "Flexibiliteit",
|
||||||
|
"override_field_label_best_price_min_distance_from_avg": "Minimale afstand",
|
||||||
|
"override_field_label_enable_min_periods_best": "Minimum aantal bereiken",
|
||||||
|
"override_field_label_min_periods_best": "Minimale periodes",
|
||||||
|
"override_field_label_relaxation_attempts_best": "Ontspanningspogingen",
|
||||||
|
"override_field_label_peak_price_min_period_length": "Minimale periodelengte",
|
||||||
|
"override_field_label_peak_price_max_level_gap_count": "Gaptolerantie",
|
||||||
|
"override_field_label_peak_price_flex": "Flexibiliteit",
|
||||||
|
"override_field_label_peak_price_min_distance_from_avg": "Minimale afstand",
|
||||||
|
"override_field_label_enable_min_periods_peak": "Minimum aantal bereiken",
|
||||||
|
"override_field_label_min_periods_peak": "Minimale periodes",
|
||||||
|
"override_field_label_relaxation_attempts_peak": "Ontspanningspogingen"
|
||||||
},
|
},
|
||||||
"config_subentries": {
|
"config_subentries": {
|
||||||
"home": {
|
"home": {
|
||||||
|
|
@ -136,6 +152,7 @@
|
||||||
"general_settings": "⚙️ Algemene Instellingen",
|
"general_settings": "⚙️ Algemene Instellingen",
|
||||||
"display_settings": "💱 Valuta Weergave",
|
"display_settings": "💱 Valuta Weergave",
|
||||||
"current_interval_price_rating": "📊 Prijsbeoordeling",
|
"current_interval_price_rating": "📊 Prijsbeoordeling",
|
||||||
|
"price_level": "🏷️ Prijsniveau",
|
||||||
"volatility": "💨 Prijsvolatiliteit",
|
"volatility": "💨 Prijsvolatiliteit",
|
||||||
"best_price": "💚 Beste Prijs Periode",
|
"best_price": "💚 Beste Prijs Periode",
|
||||||
"peak_price": "🔴 Piekprijs Periode",
|
"peak_price": "🔴 Piekprijs Periode",
|
||||||
|
|
@ -170,33 +187,37 @@
|
||||||
"submit": "↩ Opslaan & Terug"
|
"submit": "↩ Opslaan & Terug"
|
||||||
},
|
},
|
||||||
"current_interval_price_rating": {
|
"current_interval_price_rating": {
|
||||||
"title": "📊 Prijsbeoordeling Drempelwaarden",
|
"title": "📊 Instellingen Prijsbeoordeling",
|
||||||
"description": "**Configureer drempelwaarden voor prijsbeoordelingsniveaus (laag/normaal/hoog) gebaseerd op vergelijking met het voortschrijdende 24-uurs gemiddelde.**",
|
"description": "**Configureer drempelwaarden en stabilisatie voor prijsbeoordelingsniveaus (laag/normaal/hoog) gebaseerd op vergelijking met het voortschrijdende 24-uurs gemiddelde.**{entity_warning}",
|
||||||
"data": {
|
"data": {
|
||||||
"price_rating_threshold_low": "Lage Drempel",
|
"price_rating_threshold_low": "Lage Drempel",
|
||||||
"price_rating_threshold_high": "Hoge Drempel"
|
"price_rating_threshold_high": "Hoge Drempel",
|
||||||
|
"price_rating_hysteresis": "Hysterese",
|
||||||
|
"price_rating_gap_tolerance": "Gap Tolerantie"
|
||||||
},
|
},
|
||||||
"data_description": {
|
"data_description": {
|
||||||
"price_rating_threshold_low": "Percentage onder het voortschrijdende 24-uurs gemiddelde dat de huidige prijs moet zijn om te kwalificeren als 'laag' beoordelingsniveau. Voorbeeld: 5 betekent minimaal 5% onder gemiddelde. Sensoren met deze beoordeling geven gunstige tijdvensters aan. Standaard: 5%",
|
"price_rating_threshold_low": "Percentage onder het voortschrijdende 24-uurs gemiddelde dat de huidige prijs moet zijn om te kwalificeren als 'laag' beoordelingsniveau. Voorbeeld: -10 betekent minimaal 10% onder gemiddelde. Sensoren met deze beoordeling geven gunstige tijdvensters aan. Standaard: -10%",
|
||||||
"price_rating_threshold_high": "Percentage boven het voortschrijdende 24-uurs gemiddelde dat de huidige prijs moet zijn om te kwalificeren als 'hoog' beoordelingsniveau. Voorbeeld: 10 betekent minimaal 10% boven gemiddelde. Sensoren met deze beoordeling waarschuwen voor dure tijdvensters. Standaard: 10%"
|
"price_rating_threshold_high": "Percentage boven het voortschrijdende 24-uurs gemiddelde dat de huidige prijs moet zijn om te kwalificeren als 'hoog' beoordelingsniveau. Voorbeeld: 10 betekent minimaal 10% boven gemiddelde. Sensoren met deze beoordeling waarschuwen voor dure tijdvensters. Standaard: 10%",
|
||||||
|
"price_rating_hysteresis": "Percentageband rond drempelwaarden om snelle toestandswijzigingen te voorkomen. Wanneer de beoordeling al LAAG is, moet de prijs boven (drempel + hysterese) stijgen om naar NORMAAL te wisselen. Evenzo vereist HOOG dat de prijs onder (drempel - hysterese) daalt om de toestand te verlaten. Dit zorgt voor stabiliteit bij automatiseringen die reageren op beoordelingswijzigingen. Stel in op 0 om uit te schakelen. Standaard: 2%",
|
||||||
|
"price_rating_gap_tolerance": "Maximaal aantal opeenvolgende intervallen dat 'gladgestreken' kan worden als ze afwijken van omringende beoordelingen. Kleine geïsoleerde beoordelingswijzigingen worden samengevoegd met het dominante naburige blok. Dit zorgt voor stabiliteit bij automatiseringen door te voorkomen dat korte beoordelingspieken onnodige acties activeren. Voorbeeld: 1 betekent dat een enkel 'normaal'-interval omringd door 'hoog'-intervallen gecorrigeerd wordt naar 'hoog'. Stel in op 0 om uit te schakelen. Standaard: 1"
|
||||||
},
|
},
|
||||||
"submit": "↩ Opslaan & Terug"
|
"submit": "↩ Opslaan & Terug"
|
||||||
},
|
},
|
||||||
"best_price": {
|
"best_price": {
|
||||||
"title": "💚 Best Price Period Settings",
|
"title": "💚 Beste Prijs Periode Instellingen",
|
||||||
"description": "**Configure settings for the Best Price Period binary sensor. This sensor is active during periods with the lowest electricity prices.**\n\n---",
|
"description": "**Configureer instellingen voor de Beste Prijs Periode binaire sensor. Deze sensor is actief tijdens periodes met de laagste elektriciteitsprijzen.**{entity_warning}{override_warning}\n\n---",
|
||||||
"sections": {
|
"sections": {
|
||||||
"period_settings": {
|
"period_settings": {
|
||||||
"name": "Period Duration & Levels",
|
"name": "Periode Duur & Niveaus",
|
||||||
"description": "Configure how long periods should be and which price levels to include.",
|
"description": "Configureer hoe lang periodes moeten zijn en welke prijsniveaus moeten worden opgenomen.",
|
||||||
"data": {
|
"data": {
|
||||||
"best_price_min_period_length": "Minimum Period Length",
|
"best_price_min_period_length": "Minimale Periode Lengte",
|
||||||
"best_price_max_level": "Price Level Filter",
|
"best_price_max_level": "Prijsniveau Filter",
|
||||||
"best_price_max_level_gap_count": "Gap Tolerance"
|
"best_price_max_level_gap_count": "Gat Tolerantie"
|
||||||
},
|
},
|
||||||
"data_description": {
|
"data_description": {
|
||||||
"best_price_min_period_length": "Minimum duration for a period to be considered as 'best price'. Longer periods are more practical for running appliances like dishwashers or heat pumps. Best price periods require 60 minutes minimum (vs. 30 minutes for peak price warnings) because they should provide meaningful time windows for consumption planning, not just brief opportunities.",
|
"best_price_min_period_length": "Minimale duur voor een periode om als 'beste prijs' te worden beschouwd. Langere periodes zijn praktischer voor apparaten zoals vaatwassers of warmtepompen. Beste prijs periodes vereisen minimaal 60 minuten (versus 30 minuten voor piekprijs waarschuwingen) omdat ze betekenisvolle tijdvensters voor verbruiksplanning moeten bieden, niet alleen korte kansen.",
|
||||||
"best_price_max_level": "Only show best price periods if they contain intervals with price levels ≤ selected value. For example, selecting '**Cheap**' means the period must have at least one '**Very cheap**' or '**Cheap**' interval. This ensures 'best price' periods are not just relatively cheap for the day, but actually cheap in absolute terms. Select '**Any**' to show best prices regardless of their absolute price level.",
|
"best_price_max_level": "Toon alleen beste prijs periodes als ze intervallen bevatten met prijsniveaus ≤ geselecteerde waarde. Bijvoorbeeld, bij selectie '**Goedkoop**' moet de periode minimaal één '**Zeer goedkoop**' of '**Goedkoop**' interval hebben. Dit zorgt ervoor dat 'beste prijs' periodes niet alleen relatief goedkoop zijn voor de dag, maar daadwerkelijk goedkoop in absolute termen. Selecteer '**Alles**' om beste prijzen te tonen ongeacht hun absolute prijsniveau.",
|
||||||
"best_price_max_level_gap_count": "Maximaal aantal opeenvolgende intervallen toegestaan die precies één niveaustap afwijken van het vereiste niveau. Bijvoorbeeld: met '**Goedkoop**' filter en gat telling 1, wordt een reeks '**Goedkoop**, **Goedkoop**, **Normaal**, **Goedkoop**' geaccepteerd (**Normaal** is één stap boven **Goedkoop**). Dit voorkomt dat periodes worden gesplitst door incidentele niveauafwijkingen. **Let op:** Gat tolerantie vereist periodes ≥90 minuten (6 intervallen) om uitschieters effectief te detecteren. Standaard: 0 (strikte filtering, geen tolerantie)."
|
"best_price_max_level_gap_count": "Maximaal aantal opeenvolgende intervallen toegestaan die precies één niveaustap afwijken van het vereiste niveau. Bijvoorbeeld: met '**Goedkoop**' filter en gat telling 1, wordt een reeks '**Goedkoop**, **Goedkoop**, **Normaal**, **Goedkoop**' geaccepteerd (**Normaal** is één stap boven **Goedkoop**). Dit voorkomt dat periodes worden gesplitst door incidentele niveauafwijkingen. **Let op:** Gat tolerantie vereist periodes ≥90 minuten (6 intervallen) om uitschieters effectief te detecteren. Standaard: 0 (strikte filtering, geen tolerantie)."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
@ -231,7 +252,7 @@
|
||||||
},
|
},
|
||||||
"peak_price": {
|
"peak_price": {
|
||||||
"title": "🔴 Piekprijs Periode Instellingen",
|
"title": "🔴 Piekprijs Periode Instellingen",
|
||||||
"description": "**Configureer instellingen voor de Piekprijs Periode binaire sensor. Deze sensor is actief tijdens periodes met de hoogste elektriciteitsprijzen.**\n\n---",
|
"description": "**Configureer instellingen voor de Piekprijs Periode binaire sensor. Deze sensor is actief tijdens periodes met de hoogste elektriciteitsprijzen.**{entity_warning}{override_warning}\n\n---",
|
||||||
"sections": {
|
"sections": {
|
||||||
"period_settings": {
|
"period_settings": {
|
||||||
"name": "Periode Instellingen",
|
"name": "Periode Instellingen",
|
||||||
|
|
@ -278,20 +299,24 @@
|
||||||
},
|
},
|
||||||
"price_trend": {
|
"price_trend": {
|
||||||
"title": "📈 Prijstrend Drempelwaarden",
|
"title": "📈 Prijstrend Drempelwaarden",
|
||||||
"description": "**Configureer drempelwaarden voor prijstrend sensoren. Deze sensoren vergelijken de huidige prijs met het gemiddelde van de volgende N uur om te bepalen of prijzen stijgen, dalen of stabiel zijn.**",
|
"description": "**Configureer drempelwaarden voor prijstrend sensoren. Deze sensoren vergelijken de huidige prijs met het gemiddelde van de volgende N uur om te bepalen of prijzen sterk stijgen, stijgen, stabiel zijn, dalen of sterk dalen.**{entity_warning}",
|
||||||
"data": {
|
"data": {
|
||||||
"price_trend_threshold_rising": "Stijgende Drempel",
|
"price_trend_threshold_rising": "Stijgende Drempel",
|
||||||
"price_trend_threshold_falling": "Dalende Drempel"
|
"price_trend_threshold_strongly_rising": "Sterk Stijgende Drempel",
|
||||||
|
"price_trend_threshold_falling": "Dalende Drempel",
|
||||||
|
"price_trend_threshold_strongly_falling": "Sterk Dalende Drempel"
|
||||||
},
|
},
|
||||||
"data_description": {
|
"data_description": {
|
||||||
"price_trend_threshold_rising": "Percentage dat het gemiddelde van de volgende N uur boven de huidige prijs moet zijn om te kwalificeren als 'stijgende' trend. Voorbeeld: 5 betekent dat het gemiddelde minimaal 5% hoger is → prijzen zullen stijgen. Typische waarden: 5-15%. Standaard: 5%",
|
"price_trend_threshold_rising": "Percentage dat het gemiddelde van de volgende N uur boven de huidige prijs moet zijn om te kwalificeren als 'stijgende' trend. Voorbeeld: 3 betekent dat het gemiddelde minimaal 3% hoger is → prijzen zullen stijgen. Typische waarden: 3-10%. Standaard: 3%",
|
||||||
"price_trend_threshold_falling": "Percentage (negatief) dat het gemiddelde van de volgende N uur onder de huidige prijs moet zijn om te kwalificeren als 'dalende' trend. Voorbeeld: -5 betekent dat het gemiddelde minimaal 5% lager is → prijzen zullen dalen. Typische waarden: -5 tot -15%. Standaard: -5%"
|
"price_trend_threshold_strongly_rising": "Percentage dat het gemiddelde van de volgende N uur boven de huidige prijs moet zijn om te kwalificeren als 'sterk stijgende' trend. Moet hoger zijn dan stijgende drempel. Typische waarden: 6-20%. Standaard: 6%",
|
||||||
|
"price_trend_threshold_falling": "Percentage (negatief) dat het gemiddelde van de volgende N uur onder de huidige prijs moet zijn om te kwalificeren als 'dalende' trend. Voorbeeld: -3 betekent dat het gemiddelde minimaal 3% lager is → prijzen zullen dalen. Typische waarden: -3 tot -10%. Standaard: -3%",
|
||||||
|
"price_trend_threshold_strongly_falling": "Percentage (negatief) dat het gemiddelde van de volgende N uur onder de huidige prijs moet zijn om te kwalificeren als 'sterk dalende' trend. Moet lager (meer negatief) zijn dan dalende drempel. Typische waarden: -6 tot -20%. Standaard: -6%"
|
||||||
},
|
},
|
||||||
"submit": "↩ Opslaan & Terug"
|
"submit": "↩ Opslaan & Terug"
|
||||||
},
|
},
|
||||||
"volatility": {
|
"volatility": {
|
||||||
"title": "💨 Prijsvolatiliteit Drempelwaarden",
|
"title": "💨 Prijsvolatiliteit Drempelwaarden",
|
||||||
"description": "**Configureer drempelwaarden voor volatiliteitsclassificatie.** Volatiliteit meet relatieve prijsvariatie met de variëfficcïnt (CV = standaarddeviatie / gemiddelde × 100%). Deze drempelwaarden zijn percentagewaarden die werken over alle prijsniveaus.\n\nGebruikt door:\n• Volatiliteit sensoren (classificatie)\n• Trend sensoren (adaptieve drempelaanpassing: <gematigd = gevoeliger, ≥hoog = minder gevoelig)",
|
"description": "**Configureer drempelwaarden voor volatiliteitsclassificatie.** Volatiliteit meet relatieve prijsvariatie met de variëfficcïnt (CV = standaarddeviatie / gemiddelde × 100%). Deze drempelwaarden zijn percentagewaarden die werken over alle prijsniveaus.\n\nGebruikt door:\n• Volatiliteit sensoren (classificatie)\n• Trend sensoren (adaptieve drempelaanpassing: <gematigd = gevoeliger, ≥hoog = minder gevoelig){entity_warning}",
|
||||||
"data": {
|
"data": {
|
||||||
"volatility_threshold_moderate": "Gematigde Drempel",
|
"volatility_threshold_moderate": "Gematigde Drempel",
|
||||||
"volatility_threshold_high": "Hoge Drempel",
|
"volatility_threshold_high": "Hoge Drempel",
|
||||||
|
|
@ -306,7 +331,7 @@
|
||||||
},
|
},
|
||||||
"chart_data_export": {
|
"chart_data_export": {
|
||||||
"title": "📊 Grafiekdata Export Sensor",
|
"title": "📊 Grafiekdata Export Sensor",
|
||||||
"description": "De Grafiekdata Export Sensor biedt prijsgegevens als sensor attributen.\n\n⚠️ **Let op:** Deze sensor is een legacy functie voor compatibiliteit met oudere tools.\n\n**Aanbevolen voor nieuwe setups:** Gebruik de `tibber_prices.get_chartdata` **service direct** - het is flexibeler, efficïnter, en de moderne Home Assistant aanpak.\n\n**Wanneer deze sensor zinvol is:**\n\n✅ Je dashboardtool kan **alleen** attributen lezen (geen service calls)\n✅ Je hebt statische data nodig die automatisch update\n❌ **Niet voor automatiseringen:** Gebruik `tibber_prices.get_chartdata` daar direct - flexibeler en efficïnter!\n\n---\n\n**De sensor inschakelen:**\n\n1. Open **Instellingen → Apparaten & Services → Tibber Prices**\n2. Selecteer je huis → Vind **'Chart Data Export'** (Diagnose sectie)\n3. **Schakel de sensor in** (standaard uitgeschakeld)\n\n**Configuratie (optioneel):**\n\nStandaard instellingen werken out-of-the-box (vandaag+morgen, 15-minuten intervallen, alleen prijzen).\n\nVoor aanpassing, voeg toe aan **`configuration.yaml`**:\n\n```yaml\ntibber_prices:\n chart_export:\n day:\n - today\n - tomorrow\n include_level: true\n include_rating_level: true\n```\n\n**Alle parameters:** Zie `tibber_prices.get_chartdata` service documentatie",
|
"description": "De Grafiekdata Export Sensor biedt prijsgegevens als sensor attributen.\n\n⚠️ **Let op:** Deze sensor is een legacy functie voor compatibiliteit met oudere tools.\n\n**Aanbevolen voor nieuwe setups:** Gebruik de `tibber_prices.get_chartdata` **service direct** - het is flexibeler, efficïnter, en de moderne Home Assistant aanpak.\n\n**Wanneer deze sensor zinvol is:**\n\n✅ Je dashboardtool kan **alleen** attributen lezen (geen service calls)\n✅ Je hebt statische data nodig die automatisch update\n❌ **Niet voor automatiseringen:** Gebruik `tibber_prices.get_chartdata` daar direct - flexibeler en efficïnter!\n\n---\n\n{sensor_status_info}",
|
||||||
"submit": "↩ Ok & Terug"
|
"submit": "↩ Ok & Terug"
|
||||||
},
|
},
|
||||||
"reset_to_defaults": {
|
"reset_to_defaults": {
|
||||||
|
|
@ -316,6 +341,17 @@
|
||||||
"confirm_reset": "Ja, reset alles naar standaardwaarden"
|
"confirm_reset": "Ja, reset alles naar standaardwaarden"
|
||||||
},
|
},
|
||||||
"submit": "Nu Resetten"
|
"submit": "Nu Resetten"
|
||||||
|
},
|
||||||
|
"price_level": {
|
||||||
|
"title": "🏷️ Prijsniveau-instellingen",
|
||||||
|
"description": "**Configureer stabilisatie voor Tibbers prijsniveau-classificatie (zeer goedkoop/goedkoop/normaal/duur/zeer duur).**\n\nTibbers API levert een prijsniveau-veld voor elk interval. Deze instelling egaliseer korte fluctuaties om instabiliteit in automatiseringen te voorkomen.{entity_warning}",
|
||||||
|
"data": {
|
||||||
|
"price_level_gap_tolerance": "Gap-tolerantie"
|
||||||
|
},
|
||||||
|
"data_description": {
|
||||||
|
"price_level_gap_tolerance": "Maximaal aantal opeenvolgende intervallen dat 'afgevlakt' kan worden als ze afwijken van omringende prijsniveaus. Kleine geïsoleerde niveauwijzigingen worden samengevoegd met het dominante aangrenzende blok. Voorbeeld: 1 betekent dat een enkel 'normaal'-interval omringd door 'goedkoop'-intervallen wordt gecorrigeerd naar 'goedkoop'. Stel in op 0 om uit te schakelen. Standaard: 1"
|
||||||
|
},
|
||||||
|
"submit": "↩ Opslaan & terug"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"error": {
|
"error": {
|
||||||
|
|
@ -340,7 +376,11 @@
|
||||||
"invalid_volatility_threshold_very_high": "Zeer hoge volatiliteit drempel moet tussen 35% en 80% zijn",
|
"invalid_volatility_threshold_very_high": "Zeer hoge volatiliteit drempel moet tussen 35% en 80% zijn",
|
||||||
"invalid_volatility_thresholds": "Drempelwaarden moeten in oplopende volgorde zijn: gematigd < hoog < zeer hoog",
|
"invalid_volatility_thresholds": "Drempelwaarden moeten in oplopende volgorde zijn: gematigd < hoog < zeer hoog",
|
||||||
"invalid_price_trend_rising": "Stijgende trend drempel moet tussen 1% en 50% zijn",
|
"invalid_price_trend_rising": "Stijgende trend drempel moet tussen 1% en 50% zijn",
|
||||||
"invalid_price_trend_falling": "Dalende trend drempel moet tussen -50% en -1% zijn"
|
"invalid_price_trend_falling": "Dalende trend drempel moet tussen -50% en -1% zijn",
|
||||||
|
"invalid_price_trend_strongly_rising": "Sterk stijgende trend drempel moet tussen 2% en 100% zijn",
|
||||||
|
"invalid_price_trend_strongly_falling": "Sterk dalende trend drempel moet tussen -100% en -2% zijn",
|
||||||
|
"invalid_trend_strongly_rising_less_than_rising": "Sterk stijgende drempel moet hoger zijn dan stijgende drempel",
|
||||||
|
"invalid_trend_strongly_falling_greater_than_falling": "Sterk dalende drempel moet lager (meer negatief) zijn dan dalende drempel"
|
||||||
},
|
},
|
||||||
"abort": {
|
"abort": {
|
||||||
"entry_not_found": "Tibber-configuratie-item niet gevonden.",
|
"entry_not_found": "Tibber-configuratie-item niet gevonden.",
|
||||||
|
|
@ -576,73 +616,91 @@
|
||||||
"price_trend_1h": {
|
"price_trend_1h": {
|
||||||
"name": "Prijstrend (1u)",
|
"name": "Prijstrend (1u)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Sterk stijgend",
|
||||||
"rising": "Stijgend",
|
"rising": "Stijgend",
|
||||||
|
"stable": "Stabiel",
|
||||||
"falling": "Dalend",
|
"falling": "Dalend",
|
||||||
"stable": "Stabiel"
|
"strongly_falling": "Sterk dalend"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_2h": {
|
"price_trend_2h": {
|
||||||
"name": "Prijstrend (2u)",
|
"name": "Prijstrend (2u)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Sterk stijgend",
|
||||||
"rising": "Stijgend",
|
"rising": "Stijgend",
|
||||||
|
"stable": "Stabiel",
|
||||||
"falling": "Dalend",
|
"falling": "Dalend",
|
||||||
"stable": "Stabiel"
|
"strongly_falling": "Sterk dalend"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_3h": {
|
"price_trend_3h": {
|
||||||
"name": "Prijstrend (3u)",
|
"name": "Prijstrend (3u)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Sterk stijgend",
|
||||||
"rising": "Stijgend",
|
"rising": "Stijgend",
|
||||||
|
"stable": "Stabiel",
|
||||||
"falling": "Dalend",
|
"falling": "Dalend",
|
||||||
"stable": "Stabiel"
|
"strongly_falling": "Sterk dalend"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_4h": {
|
"price_trend_4h": {
|
||||||
"name": "Prijstrend (4u)",
|
"name": "Prijstrend (4u)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Sterk stijgend",
|
||||||
"rising": "Stijgend",
|
"rising": "Stijgend",
|
||||||
|
"stable": "Stabiel",
|
||||||
"falling": "Dalend",
|
"falling": "Dalend",
|
||||||
"stable": "Stabiel"
|
"strongly_falling": "Sterk dalend"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_5h": {
|
"price_trend_5h": {
|
||||||
"name": "Prijstrend (5u)",
|
"name": "Prijstrend (5u)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Sterk stijgend",
|
||||||
"rising": "Stijgend",
|
"rising": "Stijgend",
|
||||||
|
"stable": "Stabiel",
|
||||||
"falling": "Dalend",
|
"falling": "Dalend",
|
||||||
"stable": "Stabiel"
|
"strongly_falling": "Sterk dalend"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_6h": {
|
"price_trend_6h": {
|
||||||
"name": "Prijstrend (6u)",
|
"name": "Prijstrend (6u)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Sterk stijgend",
|
||||||
"rising": "Stijgend",
|
"rising": "Stijgend",
|
||||||
|
"stable": "Stabiel",
|
||||||
"falling": "Dalend",
|
"falling": "Dalend",
|
||||||
"stable": "Stabiel"
|
"strongly_falling": "Sterk dalend"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_8h": {
|
"price_trend_8h": {
|
||||||
"name": "Prijstrend (8u)",
|
"name": "Prijstrend (8u)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Sterk stijgend",
|
||||||
"rising": "Stijgend",
|
"rising": "Stijgend",
|
||||||
|
"stable": "Stabiel",
|
||||||
"falling": "Dalend",
|
"falling": "Dalend",
|
||||||
"stable": "Stabiel"
|
"strongly_falling": "Sterk dalend"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_12h": {
|
"price_trend_12h": {
|
||||||
"name": "Prijstrend (12u)",
|
"name": "Prijstrend (12u)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Sterk stijgend",
|
||||||
"rising": "Stijgend",
|
"rising": "Stijgend",
|
||||||
|
"stable": "Stabiel",
|
||||||
"falling": "Dalend",
|
"falling": "Dalend",
|
||||||
"stable": "Stabiel"
|
"strongly_falling": "Sterk dalend"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"current_price_trend": {
|
"current_price_trend": {
|
||||||
"name": "Huidige Prijstrend",
|
"name": "Huidige Prijstrend",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Sterk stijgend",
|
||||||
"rising": "Stijgend",
|
"rising": "Stijgend",
|
||||||
|
"stable": "Stabiel",
|
||||||
"falling": "Dalend",
|
"falling": "Dalend",
|
||||||
"stable": "Stabiel"
|
"strongly_falling": "Sterk dalend"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"next_price_trend_change": {
|
"next_price_trend_change": {
|
||||||
|
|
@ -844,6 +902,52 @@
|
||||||
"realtime_consumption_enabled": {
|
"realtime_consumption_enabled": {
|
||||||
"name": "Realtime Verbruik Ingeschakeld"
|
"name": "Realtime Verbruik Ingeschakeld"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"number": {
|
||||||
|
"best_price_flex_override": {
|
||||||
|
"name": "Beste prijs: Flexibiliteit"
|
||||||
|
},
|
||||||
|
"best_price_min_distance_override": {
|
||||||
|
"name": "Beste prijs: Minimale afstand"
|
||||||
|
},
|
||||||
|
"best_price_min_period_length_override": {
|
||||||
|
"name": "Beste prijs: Minimale periodelengte"
|
||||||
|
},
|
||||||
|
"best_price_min_periods_override": {
|
||||||
|
"name": "Beste prijs: Minimum periodes"
|
||||||
|
},
|
||||||
|
"best_price_relaxation_attempts_override": {
|
||||||
|
"name": "Beste prijs: Versoepeling pogingen"
|
||||||
|
},
|
||||||
|
"best_price_gap_count_override": {
|
||||||
|
"name": "Beste prijs: Gap tolerantie"
|
||||||
|
},
|
||||||
|
"peak_price_flex_override": {
|
||||||
|
"name": "Piekprijs: Flexibiliteit"
|
||||||
|
},
|
||||||
|
"peak_price_min_distance_override": {
|
||||||
|
"name": "Piekprijs: Minimale afstand"
|
||||||
|
},
|
||||||
|
"peak_price_min_period_length_override": {
|
||||||
|
"name": "Piekprijs: Minimale periodelengte"
|
||||||
|
},
|
||||||
|
"peak_price_min_periods_override": {
|
||||||
|
"name": "Piekprijs: Minimum periodes"
|
||||||
|
},
|
||||||
|
"peak_price_relaxation_attempts_override": {
|
||||||
|
"name": "Piekprijs: Versoepeling pogingen"
|
||||||
|
},
|
||||||
|
"peak_price_gap_count_override": {
|
||||||
|
"name": "Piekprijs: Gap tolerantie"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"switch": {
|
||||||
|
"best_price_enable_relaxation_override": {
|
||||||
|
"name": "Beste prijs: Minimum aantal bereiken"
|
||||||
|
},
|
||||||
|
"peak_price_enable_relaxation_override": {
|
||||||
|
"name": "Piekprijs: Minimum aantal bereiken"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"issues": {
|
"issues": {
|
||||||
|
|
@ -906,6 +1010,14 @@
|
||||||
"highlight_best_price": {
|
"highlight_best_price": {
|
||||||
"name": "Beste prijsperiodes markeren",
|
"name": "Beste prijsperiodes markeren",
|
||||||
"description": "Voeg een halfdo0rzichtige groene overlay toe om de beste prijsperiodes in de grafiek te markeren. Dit maakt het gemakkelijk om visueel de optimale tijden voor energieverbruik te identificeren."
|
"description": "Voeg een halfdo0rzichtige groene overlay toe om de beste prijsperiodes in de grafiek te markeren. Dit maakt het gemakkelijk om visueel de optimale tijden voor energieverbruik te identificeren."
|
||||||
|
},
|
||||||
|
"highlight_peak_price": {
|
||||||
|
"name": "Piekprijsperiodes markeren",
|
||||||
|
"description": "Voeg een halfdoorzichtige rode overlay toe om de piekprijsperiodes in de grafiek te markeren. Dit maakt het gemakkelijk om visueel de tijden te identificeren wanneer energie het duurst is."
|
||||||
|
},
|
||||||
|
"resolution": {
|
||||||
|
"name": "Resolutie",
|
||||||
|
"description": "Tijdresolutie voor de grafiekdata. 'interval' (standaard): Originele 15-minutenintervallen (96 punten per dag). 'hourly': Geaggregeerde uurwaarden met een rollend 60-minutenvenster (24 punten per dag) voor een overzichtelijkere grafiek."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -11,14 +11,14 @@
|
||||||
},
|
},
|
||||||
"new_token": {
|
"new_token": {
|
||||||
"title": "Ange API-token",
|
"title": "Ange API-token",
|
||||||
"description": "Konfigurera Tibber Prisinformation & Betyg.\n\nFör att generera en API-åtkomsttoken, besök https://developer.tibber.com.",
|
"description": "Konfigurera Tibber Prisinformation & Betyg.\n\nFör att generera en API-åtkomsttoken, besök [{tibber_url}]({tibber_url}).",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API-åtkomsttoken"
|
"access_token": "API-åtkomsttoken"
|
||||||
},
|
},
|
||||||
"submit": "Validera token"
|
"submit": "Validera token"
|
||||||
},
|
},
|
||||||
"user": {
|
"user": {
|
||||||
"description": "Konfigurera Tibber Prisinformation & Betyg.\n\nFör att generera en API-åtkomsttoken, besök https://developer.tibber.com.",
|
"description": "Konfigurera Tibber Prisinformation & Betyg.\n\nFör att generera en API-åtkomsttoken, besök [{tibber_url}]({tibber_url}).",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API-åtkomsttoken"
|
"access_token": "API-åtkomsttoken"
|
||||||
},
|
},
|
||||||
|
|
@ -42,7 +42,7 @@
|
||||||
},
|
},
|
||||||
"reauth_confirm": {
|
"reauth_confirm": {
|
||||||
"title": "Återautentisera Tibber-prisintegration",
|
"title": "Återautentisera Tibber-prisintegration",
|
||||||
"description": "Åtkomsttoken för Tibber är inte längre giltig. Ange en ny API-åtkomsttoken för att fortsätta använda denna integration.\n\nFör att generera en ny API-åtkomsttoken, besök https://developer.tibber.com.",
|
"description": "Åtkomsttoken för Tibber är inte längre giltig. Ange en ny API-åtkomsttoken för att fortsätta använda denna integration.\n\nFör att generera en ny API-åtkomsttoken, besök [{tibber_url}]({tibber_url}).",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API-åtkomsttoken"
|
"access_token": "API-åtkomsttoken"
|
||||||
},
|
},
|
||||||
|
|
@ -77,7 +77,23 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"common": {
|
"common": {
|
||||||
"step_progress": "{step_num} / {total_steps}"
|
"step_progress": "{step_num} / {total_steps}",
|
||||||
|
"override_warning_template": "⚠️ {fields} styrs av konfigurationsentitet",
|
||||||
|
"override_warning_and": "och",
|
||||||
|
"override_field_label_best_price_min_period_length": "Minsta periodlängd",
|
||||||
|
"override_field_label_best_price_max_level_gap_count": "Glappstolerans",
|
||||||
|
"override_field_label_best_price_flex": "Flexibilitet",
|
||||||
|
"override_field_label_best_price_min_distance_from_avg": "Minsta avstånd",
|
||||||
|
"override_field_label_enable_min_periods_best": "Uppnå minsta antal",
|
||||||
|
"override_field_label_min_periods_best": "Minimiperioder",
|
||||||
|
"override_field_label_relaxation_attempts_best": "Avslappningsförsök",
|
||||||
|
"override_field_label_peak_price_min_period_length": "Minsta periodlängd",
|
||||||
|
"override_field_label_peak_price_max_level_gap_count": "Glappstolerans",
|
||||||
|
"override_field_label_peak_price_flex": "Flexibilitet",
|
||||||
|
"override_field_label_peak_price_min_distance_from_avg": "Minsta avstånd",
|
||||||
|
"override_field_label_enable_min_periods_peak": "Uppnå minsta antal",
|
||||||
|
"override_field_label_min_periods_peak": "Minimiperioder",
|
||||||
|
"override_field_label_relaxation_attempts_peak": "Avslappningsförsök"
|
||||||
},
|
},
|
||||||
"config_subentries": {
|
"config_subentries": {
|
||||||
"home": {
|
"home": {
|
||||||
|
|
@ -136,6 +152,7 @@
|
||||||
"general_settings": "⚙️ Allmänna inställningar",
|
"general_settings": "⚙️ Allmänna inställningar",
|
||||||
"display_settings": "💱 Valutavisning",
|
"display_settings": "💱 Valutavisning",
|
||||||
"current_interval_price_rating": "📊 Prisbetyg",
|
"current_interval_price_rating": "📊 Prisbetyg",
|
||||||
|
"price_level": "🏷️ Prisnivå",
|
||||||
"volatility": "💨 Prisvolatilitet",
|
"volatility": "💨 Prisvolatilitet",
|
||||||
"best_price": "💚 Bästa Prisperiod",
|
"best_price": "💚 Bästa Prisperiod",
|
||||||
"peak_price": "🔴 Topprisperiod",
|
"peak_price": "🔴 Topprisperiod",
|
||||||
|
|
@ -170,21 +187,25 @@
|
||||||
"submit": "↩ Spara & tillbaka"
|
"submit": "↩ Spara & tillbaka"
|
||||||
},
|
},
|
||||||
"current_interval_price_rating": {
|
"current_interval_price_rating": {
|
||||||
"title": "📊 Prisbetygströsklar",
|
"title": "📊 Prisbetyginställningar",
|
||||||
"description": "**Konfigurera tröskelvärden för prisbetygsnivåer (låg/normal/hög) baserat på jämförelse med glidande 24-timmars genomsnitt.**",
|
"description": "**Konfigurera tröskelvärden och stabilisering för prisbetygsnivåer (låg/normal/hög) baserat på jämförelse med glidande 24-timmars genomsnitt.**{entity_warning}",
|
||||||
"data": {
|
"data": {
|
||||||
"price_rating_threshold_low": "Låg tröskel",
|
"price_rating_threshold_low": "Låg tröskel",
|
||||||
"price_rating_threshold_high": "Hög tröskel"
|
"price_rating_threshold_high": "Hög tröskel",
|
||||||
|
"price_rating_hysteresis": "Hysteres",
|
||||||
|
"price_rating_gap_tolerance": "Gap-tolerans"
|
||||||
},
|
},
|
||||||
"data_description": {
|
"data_description": {
|
||||||
"price_rating_threshold_low": "Procentandel under det glidande 24-timmars genomsnittet som det aktuella priset måste vara för att kvalificera som 'lågt' betyg. Exempel: 5 betyder minst 5% under genomsnittet. Sensorer med detta betyg indikerar gynnsamma tidsfönster. Standard: 5%",
|
"price_rating_threshold_low": "Procentandel under det glidande 24-timmars genomsnittet som det aktuella priset måste vara för att kvalificera som 'lågt' betyg. Exempel: -10 betyder minst 10% under genomsnittet. Sensorer med detta betyg indikerar gynnsamma tidsfönster. Standard: -10%",
|
||||||
"price_rating_threshold_high": "Procentandel över det glidande 24-timmars genomsnittet som det aktuella priset måste vara för att kvalificera som 'högt' betyg. Exempel: 10 betyder minst 10% över genomsnittet. Sensorer med detta betyg varnar om dyra tidsfönster. Standard: 10%"
|
"price_rating_threshold_high": "Procentandel över det glidande 24-timmars genomsnittet som det aktuella priset måste vara för att kvalificera som 'högt' betyg. Exempel: 10 betyder minst 10% över genomsnittet. Sensorer med detta betyg varnar om dyra tidsfönster. Standard: 10%",
|
||||||
|
"price_rating_hysteresis": "Procentband runt tröskelvärden för att undvika snabba tillståndsändringar. När betyget redan är LÅGT måste priset stiga över (tröskel + hysteres) för att byta till NORMAL. Likaså kräver HÖGT att priset faller under (tröskel - hysteres) för att lämna tillståndet. Detta ger stabilitet för automatiseringar som reagerar på betygsändringar. Sätt till 0 för att inaktivera. Standard: 2%",
|
||||||
|
"price_rating_gap_tolerance": "Maximalt antal på varandra följande intervaller som kan 'jämnas ut' om de avviker från omgivande betyg. Små isolerade betygsändringar sammanfogas med det dominerande grannblocket. Detta ger stabilitet för automatiseringar genom att förhindra att korta betygstoppar utlöser onödiga åtgärder. Exempel: 1 betyder att ett enstaka 'normal'-intervall omgivet av 'hög'-intervaller korrigeras till 'hög'. Sätt till 0 för att inaktivera. Standard: 1"
|
||||||
},
|
},
|
||||||
"submit": "↩ Spara & tillbaka"
|
"submit": "↩ Spara & tillbaka"
|
||||||
},
|
},
|
||||||
"best_price": {
|
"best_price": {
|
||||||
"title": "💚 Bästa Prisperiod-inställningar",
|
"title": "💚 Bästa Prisperiod-inställningar",
|
||||||
"description": "**Konfigurera inställningar för binärsensorn Bästa Prisperiod. Denna sensor är aktiv under perioder med lägsta elpriserna.**\n\n---",
|
"description": "**Konfigurera inställningar för binärsensorn Bästa Prisperiod. Denna sensor är aktiv under perioder med lägsta elpriserna.**{entity_warning}{override_warning}\n\n---",
|
||||||
"sections": {
|
"sections": {
|
||||||
"period_settings": {
|
"period_settings": {
|
||||||
"name": "Periodlängd & Nivåer",
|
"name": "Periodlängd & Nivåer",
|
||||||
|
|
@ -231,7 +252,7 @@
|
||||||
},
|
},
|
||||||
"peak_price": {
|
"peak_price": {
|
||||||
"title": "🔴 Topprisperiod-inställningar",
|
"title": "🔴 Topprisperiod-inställningar",
|
||||||
"description": "**Konfigurera inställningar för binärsensorn Topprisperiod. Denna sensor är aktiv under perioder med högsta elpriserna.**\n\n---",
|
"description": "**Konfigurera inställningar för binärsensorn Topprisperiod. Denna sensor är aktiv under perioder med högsta elpriserna.**{entity_warning}{override_warning}\n\n---",
|
||||||
"sections": {
|
"sections": {
|
||||||
"period_settings": {
|
"period_settings": {
|
||||||
"name": "Periodinställningar",
|
"name": "Periodinställningar",
|
||||||
|
|
@ -278,20 +299,24 @@
|
||||||
},
|
},
|
||||||
"price_trend": {
|
"price_trend": {
|
||||||
"title": "📈 Pristrendtrösklar",
|
"title": "📈 Pristrendtrösklar",
|
||||||
"description": "**Konfigurera tröskelvärden för pristrendsensorer. Dessa sensorer jämför aktuellt pris med genomsnittet av de nästa N timmarna för att bestämma om priserna stiger, faller eller är stabila.**",
|
"description": "**Konfigurera tröskelvärden för pristrendsensorer. Dessa sensorer jämför aktuellt pris med genomsnittet av de nästa N timmarna för att bestämma om priserna stiger kraftigt, stiger, är stabila, faller eller faller kraftigt.**{entity_warning}",
|
||||||
"data": {
|
"data": {
|
||||||
"price_trend_threshold_rising": "Stigande tröskel",
|
"price_trend_threshold_rising": "Stigande tröskel",
|
||||||
"price_trend_threshold_falling": "Fallande tröskel"
|
"price_trend_threshold_strongly_rising": "Kraftigt stigande tröskel",
|
||||||
|
"price_trend_threshold_falling": "Fallande tröskel",
|
||||||
|
"price_trend_threshold_strongly_falling": "Kraftigt fallande tröskel"
|
||||||
},
|
},
|
||||||
"data_description": {
|
"data_description": {
|
||||||
"price_trend_threshold_rising": "Procentandel som genomsnittet av de nästa N timmarna måste vara över det aktuella priset för att kvalificera som 'stigande' trend. Exempel: 5 betyder att genomsnittet är minst 5% högre → priserna kommer att stiga. Typiska värden: 5-15%. Standard: 5%",
|
"price_trend_threshold_rising": "Procentandel som genomsnittet av de nästa N timmarna måste vara över det aktuella priset för att kvalificera som 'stigande' trend. Exempel: 3 betyder att genomsnittet är minst 3% högre → priserna kommer att stiga. Typiska värden: 3-10%. Standard: 3%",
|
||||||
"price_trend_threshold_falling": "Procentandel (negativ) som genomsnittet av de nästa N timmarna måste vara under det aktuella priset för att kvalificera som 'fallande' trend. Exempel: -5 betyder att genomsnittet är minst 5% lägre → priserna kommer att falla. Typiska värden: -5 till -15%. Standard: -5%"
|
"price_trend_threshold_strongly_rising": "Procentandel som genomsnittet av de nästa N timmarna måste vara över det aktuella priset för att kvalificera som 'kraftigt stigande' trend. Måste vara högre än stigande tröskel. Typiska värden: 6-20%. Standard: 6%",
|
||||||
|
"price_trend_threshold_falling": "Procentandel (negativ) som genomsnittet av de nästa N timmarna måste vara under det aktuella priset för att kvalificera som 'fallande' trend. Exempel: -3 betyder att genomsnittet är minst 3% lägre → priserna kommer att falla. Typiska värden: -3 till -10%. Standard: -3%",
|
||||||
|
"price_trend_threshold_strongly_falling": "Procentandel (negativ) som genomsnittet av de nästa N timmarna måste vara under det aktuella priset för att kvalificera som 'kraftigt fallande' trend. Måste vara lägre (mer negativ) än fallande tröskel. Typiska värden: -6 till -20%. Standard: -6%"
|
||||||
},
|
},
|
||||||
"submit": "↩ Spara & tillbaka"
|
"submit": "↩ Spara & tillbaka"
|
||||||
},
|
},
|
||||||
"volatility": {
|
"volatility": {
|
||||||
"title": "💨 Prisvolatilitetströsklar",
|
"title": "💨 Prisvolatilitetströsklar",
|
||||||
"description": "**Konfigurera tröskelvärden för volatilitetsklassificering.** Volatilitet mäter relativ prisvariation med variationskoefficienten (CV = standardavvikelse / medelvärde × 100%). Dessa tröskelvärden är procentvärden som fungerar över alla prisnivåer.\n\nAnvänds av:\n• Volatilitetssensorer (klassificering)\n• Trendsensorer (adaptiv tröskeljustering: <måttlig = mer känslig, ≥hög = mindre känslig)",
|
"description": "**Konfigurera tröskelvärden för volatilitetsklassificering.** Volatilitet mäter relativ prisvariation med variationskoefficienten (CV = standardavvikelse / medelvärde × 100%). Dessa tröskelvärden är procentvärden som fungerar över alla prisnivåer.\n\nAnvänds av:\n• Volatilitetssensorer (klassificering)\n• Trendsensorer (adaptiv tröskeljustering: <måttlig = mer känslig, ≥hög = mindre känslig){entity_warning}",
|
||||||
"data": {
|
"data": {
|
||||||
"volatility_threshold_moderate": "Måttlig tröskel",
|
"volatility_threshold_moderate": "Måttlig tröskel",
|
||||||
"volatility_threshold_high": "Hög tröskel",
|
"volatility_threshold_high": "Hög tröskel",
|
||||||
|
|
@ -306,7 +331,7 @@
|
||||||
},
|
},
|
||||||
"chart_data_export": {
|
"chart_data_export": {
|
||||||
"title": "📊 Diagramdataexport-sensor",
|
"title": "📊 Diagramdataexport-sensor",
|
||||||
"description": "Diagramdataexport-sensorn tillhandahåller prisdata som sensorattribut.\n\n⚠️ **Obs:** Denna sensor är en äldre funktion för kompatibilitet med äldre verktyg.\n\n**Rekommenderat för nya konfigurationer:** Använd `tibber_prices.get_chartdata` **tjänsten direkt** - den är mer flexibel, effektiv och det moderna Home Assistant-sättet.\n\n**När denna sensor är meningsfull:**\n\n✅ Ditt instrumentpanelverktyg kan **endast** läsa attribut (inga tjänsteanrop)\n✅ Du behöver statisk data som uppdateras automatiskt\n❌ **Inte för automationer:** Använd `tibber_prices.get_chartdata` direkt där - mer flexibelt och effektivt!\n\n---\n\n**Aktivera sensorn:**\n\n1. Öppna **Inställningar → Enheter & Tjänster → Tibber-priser**\n2. Välj ditt hem → Hitta **'Diagramdataexport'** (Diagnostiksektion)\n3. **Aktivera sensorn** (inaktiverad som standard)\n\n**Konfiguration (valfritt):**\n\nStandardinställningar fungerar direkt (idag+imorgon, 15-minutersintervall, endast priser).\n\nFör anpassning, lägg till i **`configuration.yaml`**:\n\n```yaml\ntibber_prices:\n chart_export:\n day:\n - today\n - tomorrow\n include_level: true\n include_rating_level: true\n```\n\n**Alla parametrar:** Se `tibber_prices.get_chartdata` tjänstdokumentation",
|
"description": "Diagramdataexport-sensorn tillhandahåller prisdata som sensorattribut.\n\n⚠️ **Obs:** Denna sensor är en äldre funktion för kompatibilitet med äldre verktyg.\n\n**Rekommenderat för nya konfigurationer:** Använd `tibber_prices.get_chartdata` **tjänsten direkt** - den är mer flexibel, effektiv och det moderna Home Assistant-sättet.\n\n**När denna sensor är meningsfull:**\n\n✅ Ditt instrumentpanelverktyg kan **endast** läsa attribut (inga tjänsteanrop)\n✅ Du behöver statisk data som uppdateras automatiskt\n❌ **Inte för automationer:** Använd `tibber_prices.get_chartdata` direkt där - mer flexibelt och effektivt!\n\n---\n\n{sensor_status_info}",
|
||||||
"submit": "↩ Ok & tillbaka"
|
"submit": "↩ Ok & tillbaka"
|
||||||
},
|
},
|
||||||
"reset_to_defaults": {
|
"reset_to_defaults": {
|
||||||
|
|
@ -316,6 +341,17 @@
|
||||||
"confirm_reset": "Ja, återställ allt till standard"
|
"confirm_reset": "Ja, återställ allt till standard"
|
||||||
},
|
},
|
||||||
"submit": "Återställ nu"
|
"submit": "Återställ nu"
|
||||||
|
},
|
||||||
|
"price_level": {
|
||||||
|
"title": "🏷️ Prisnivå-inställningar",
|
||||||
|
"description": "**Konfigurera stabilisering för Tibbers prisnivå-klassificering (mycket billig/billig/normal/dyr/mycket dyr).**\n\nTibbers API tillhandahåller ett prisnivå-fält för varje intervall. Denna inställning jämnar ut korta fluktuationer för att förhindra instabilitet i automatiseringar.{entity_warning}",
|
||||||
|
"data": {
|
||||||
|
"price_level_gap_tolerance": "Gap-tolerans"
|
||||||
|
},
|
||||||
|
"data_description": {
|
||||||
|
"price_level_gap_tolerance": "Maximalt antal på varandra följande intervaller som kan 'jämnas ut' om de avviker från omgivande prisnivåer. Små isolerade nivåförändringar sammanfogas med det dominerande grannblocket. Exempel: 1 betyder att ett enstaka 'normal'-intervall omgivet av 'billig'-intervaller korrigeras till 'billig'. Sätt till 0 för att inaktivera. Standard: 1"
|
||||||
|
},
|
||||||
|
"submit": "↩ Spara & tillbaka"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"error": {
|
"error": {
|
||||||
|
|
@ -340,7 +376,11 @@
|
||||||
"invalid_volatility_threshold_very_high": "Mycket hög volatilitetströskel måste vara mellan 35% och 80%",
|
"invalid_volatility_threshold_very_high": "Mycket hög volatilitetströskel måste vara mellan 35% och 80%",
|
||||||
"invalid_volatility_thresholds": "Trösklar måste vara i stigande ordning: måttlig < hög < mycket hög",
|
"invalid_volatility_thresholds": "Trösklar måste vara i stigande ordning: måttlig < hög < mycket hög",
|
||||||
"invalid_price_trend_rising": "Stigande trendtröskel måste vara mellan 1% och 50%",
|
"invalid_price_trend_rising": "Stigande trendtröskel måste vara mellan 1% och 50%",
|
||||||
"invalid_price_trend_falling": "Fallande trendtröskel måste vara mellan -50% och -1%"
|
"invalid_price_trend_falling": "Fallande trendtröskel måste vara mellan -50% och -1%",
|
||||||
|
"invalid_price_trend_strongly_rising": "Kraftigt stigande trendtröskel måste vara mellan 2% och 100%",
|
||||||
|
"invalid_price_trend_strongly_falling": "Kraftigt fallande trendtröskel måste vara mellan -100% och -2%",
|
||||||
|
"invalid_trend_strongly_rising_less_than_rising": "Kraftigt stigande-tröskel måste vara högre än stigande-tröskel",
|
||||||
|
"invalid_trend_strongly_falling_greater_than_falling": "Kraftigt fallande-tröskel måste vara lägre (mer negativ) än fallande-tröskel"
|
||||||
},
|
},
|
||||||
"abort": {
|
"abort": {
|
||||||
"entry_not_found": "Tibber-konfigurationspost hittades inte.",
|
"entry_not_found": "Tibber-konfigurationspost hittades inte.",
|
||||||
|
|
@ -576,73 +616,91 @@
|
||||||
"price_trend_1h": {
|
"price_trend_1h": {
|
||||||
"name": "Pristrend (1h)",
|
"name": "Pristrend (1h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Kraftigt stigande",
|
||||||
"rising": "Stigande",
|
"rising": "Stigande",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallande",
|
"falling": "Fallande",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Kraftigt fallande"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_2h": {
|
"price_trend_2h": {
|
||||||
"name": "Pristrend (2h)",
|
"name": "Pristrend (2h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Kraftigt stigande",
|
||||||
"rising": "Stigande",
|
"rising": "Stigande",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallande",
|
"falling": "Fallande",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Kraftigt fallande"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_3h": {
|
"price_trend_3h": {
|
||||||
"name": "Pristrend (3h)",
|
"name": "Pristrend (3h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Kraftigt stigande",
|
||||||
"rising": "Stigande",
|
"rising": "Stigande",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallande",
|
"falling": "Fallande",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Kraftigt fallande"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_4h": {
|
"price_trend_4h": {
|
||||||
"name": "Pristrend (4h)",
|
"name": "Pristrend (4h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Kraftigt stigande",
|
||||||
"rising": "Stigande",
|
"rising": "Stigande",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallande",
|
"falling": "Fallande",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Kraftigt fallande"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_5h": {
|
"price_trend_5h": {
|
||||||
"name": "Pristrend (5h)",
|
"name": "Pristrend (5h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Kraftigt stigande",
|
||||||
"rising": "Stigande",
|
"rising": "Stigande",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallande",
|
"falling": "Fallande",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Kraftigt fallande"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_6h": {
|
"price_trend_6h": {
|
||||||
"name": "Pristrend (6h)",
|
"name": "Pristrend (6h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Kraftigt stigande",
|
||||||
"rising": "Stigande",
|
"rising": "Stigande",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallande",
|
"falling": "Fallande",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Kraftigt fallande"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_8h": {
|
"price_trend_8h": {
|
||||||
"name": "Pristrend (8h)",
|
"name": "Pristrend (8h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Kraftigt stigande",
|
||||||
"rising": "Stigande",
|
"rising": "Stigande",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallande",
|
"falling": "Fallande",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Kraftigt fallande"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_12h": {
|
"price_trend_12h": {
|
||||||
"name": "Pristrend (12h)",
|
"name": "Pristrend (12h)",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Kraftigt stigande",
|
||||||
"rising": "Stigande",
|
"rising": "Stigande",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallande",
|
"falling": "Fallande",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Kraftigt fallande"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"current_price_trend": {
|
"current_price_trend": {
|
||||||
"name": "Aktuell pristrend",
|
"name": "Aktuell pristrend",
|
||||||
"state": {
|
"state": {
|
||||||
|
"strongly_rising": "Kraftigt stigande",
|
||||||
"rising": "Stigande",
|
"rising": "Stigande",
|
||||||
|
"stable": "Stabil",
|
||||||
"falling": "Fallande",
|
"falling": "Fallande",
|
||||||
"stable": "Stabil"
|
"strongly_falling": "Kraftigt fallande"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"next_price_trend_change": {
|
"next_price_trend_change": {
|
||||||
|
|
@ -844,6 +902,52 @@
|
||||||
"realtime_consumption_enabled": {
|
"realtime_consumption_enabled": {
|
||||||
"name": "Realtidsförbrukning aktiverad"
|
"name": "Realtidsförbrukning aktiverad"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"number": {
|
||||||
|
"best_price_flex_override": {
|
||||||
|
"name": "Bästa pris: Flexibilitet"
|
||||||
|
},
|
||||||
|
"best_price_min_distance_override": {
|
||||||
|
"name": "Bästa pris: Minimiavstånd"
|
||||||
|
},
|
||||||
|
"best_price_min_period_length_override": {
|
||||||
|
"name": "Bästa pris: Minsta periodlängd"
|
||||||
|
},
|
||||||
|
"best_price_min_periods_override": {
|
||||||
|
"name": "Bästa pris: Minsta antal perioder"
|
||||||
|
},
|
||||||
|
"best_price_relaxation_attempts_override": {
|
||||||
|
"name": "Bästa pris: Lättnadsförsök"
|
||||||
|
},
|
||||||
|
"best_price_gap_count_override": {
|
||||||
|
"name": "Bästa pris: Glaptolerans"
|
||||||
|
},
|
||||||
|
"peak_price_flex_override": {
|
||||||
|
"name": "Topppris: Flexibilitet"
|
||||||
|
},
|
||||||
|
"peak_price_min_distance_override": {
|
||||||
|
"name": "Topppris: Minimiavstånd"
|
||||||
|
},
|
||||||
|
"peak_price_min_period_length_override": {
|
||||||
|
"name": "Topppris: Minsta periodlängd"
|
||||||
|
},
|
||||||
|
"peak_price_min_periods_override": {
|
||||||
|
"name": "Topppris: Minsta antal perioder"
|
||||||
|
},
|
||||||
|
"peak_price_relaxation_attempts_override": {
|
||||||
|
"name": "Topppris: Lättnadsförsök"
|
||||||
|
},
|
||||||
|
"peak_price_gap_count_override": {
|
||||||
|
"name": "Topppris: Glaptolerans"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"switch": {
|
||||||
|
"best_price_enable_relaxation_override": {
|
||||||
|
"name": "Bästa pris: Uppnå minimiantal"
|
||||||
|
},
|
||||||
|
"peak_price_enable_relaxation_override": {
|
||||||
|
"name": "Topppris: Uppnå minimiantal"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"issues": {
|
"issues": {
|
||||||
|
|
@ -906,6 +1010,14 @@
|
||||||
"highlight_best_price": {
|
"highlight_best_price": {
|
||||||
"name": "Markera bästa prisperioder",
|
"name": "Markera bästa prisperioder",
|
||||||
"description": "Lägg till ett halvtransparent grönt överlag för att markera de bästa prisperioderna i diagrammet. Detta gör det enkelt att visuellt identifiera de optimala tiderna för energiförbrukning."
|
"description": "Lägg till ett halvtransparent grönt överlag för att markera de bästa prisperioderna i diagrammet. Detta gör det enkelt att visuellt identifiera de optimala tiderna för energiförbrukning."
|
||||||
|
},
|
||||||
|
"highlight_peak_price": {
|
||||||
|
"name": "Markera högsta prisperioder",
|
||||||
|
"description": "Lägg till ett halvtransparent rött överlag för att markera de högsta prisperioderna i diagrammet. Detta gör det enkelt att visuellt identifiera tiderna när energi är som dyrast."
|
||||||
|
},
|
||||||
|
"resolution": {
|
||||||
|
"name": "Upplösning",
|
||||||
|
"description": "Tidsupplösning för diagramdata. 'interval' (standard): Ursprungliga 15-minutersintervall (96 punkter per dag). 'hourly': Aggregerade timvärden med ett rullande 60-minutersfönster (24 punkter per dag) för ett renare och mindre rörigt diagram."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -32,11 +32,13 @@ from .price import (
|
||||||
aggregate_period_ratings,
|
aggregate_period_ratings,
|
||||||
aggregate_price_levels,
|
aggregate_price_levels,
|
||||||
aggregate_price_rating,
|
aggregate_price_rating,
|
||||||
|
calculate_coefficient_of_variation,
|
||||||
calculate_difference_percentage,
|
calculate_difference_percentage,
|
||||||
calculate_price_trend,
|
calculate_price_trend,
|
||||||
calculate_rating_level,
|
calculate_rating_level,
|
||||||
calculate_trailing_average_for_interval,
|
calculate_trailing_average_for_interval,
|
||||||
calculate_volatility_level,
|
calculate_volatility_level,
|
||||||
|
calculate_volatility_with_cv,
|
||||||
enrich_price_info_with_differences,
|
enrich_price_info_with_differences,
|
||||||
find_price_data_for_interval,
|
find_price_data_for_interval,
|
||||||
)
|
)
|
||||||
|
|
@ -46,6 +48,7 @@ __all__ = [
|
||||||
"aggregate_period_ratings",
|
"aggregate_period_ratings",
|
||||||
"aggregate_price_levels",
|
"aggregate_price_levels",
|
||||||
"aggregate_price_rating",
|
"aggregate_price_rating",
|
||||||
|
"calculate_coefficient_of_variation",
|
||||||
"calculate_current_leading_max",
|
"calculate_current_leading_max",
|
||||||
"calculate_current_leading_mean",
|
"calculate_current_leading_mean",
|
||||||
"calculate_current_leading_min",
|
"calculate_current_leading_min",
|
||||||
|
|
@ -60,6 +63,7 @@ __all__ = [
|
||||||
"calculate_rating_level",
|
"calculate_rating_level",
|
||||||
"calculate_trailing_average_for_interval",
|
"calculate_trailing_average_for_interval",
|
||||||
"calculate_volatility_level",
|
"calculate_volatility_level",
|
||||||
|
"calculate_volatility_with_cv",
|
||||||
"enrich_price_info_with_differences",
|
"enrich_price_info_with_differences",
|
||||||
"find_price_data_for_interval",
|
"find_price_data_for_interval",
|
||||||
]
|
]
|
||||||
|
|
|
||||||
|
|
@ -11,12 +11,21 @@ if TYPE_CHECKING:
|
||||||
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import (
|
from custom_components.tibber_prices.const import (
|
||||||
|
DEFAULT_PRICE_LEVEL_GAP_TOLERANCE,
|
||||||
|
DEFAULT_PRICE_RATING_GAP_TOLERANCE,
|
||||||
|
DEFAULT_PRICE_RATING_HYSTERESIS,
|
||||||
DEFAULT_VOLATILITY_THRESHOLD_HIGH,
|
DEFAULT_VOLATILITY_THRESHOLD_HIGH,
|
||||||
DEFAULT_VOLATILITY_THRESHOLD_MODERATE,
|
DEFAULT_VOLATILITY_THRESHOLD_MODERATE,
|
||||||
DEFAULT_VOLATILITY_THRESHOLD_VERY_HIGH,
|
DEFAULT_VOLATILITY_THRESHOLD_VERY_HIGH,
|
||||||
PRICE_LEVEL_MAPPING,
|
PRICE_LEVEL_MAPPING,
|
||||||
PRICE_LEVEL_NORMAL,
|
PRICE_LEVEL_NORMAL,
|
||||||
PRICE_RATING_NORMAL,
|
PRICE_RATING_NORMAL,
|
||||||
|
PRICE_TREND_FALLING,
|
||||||
|
PRICE_TREND_MAPPING,
|
||||||
|
PRICE_TREND_RISING,
|
||||||
|
PRICE_TREND_STABLE,
|
||||||
|
PRICE_TREND_STRONGLY_FALLING,
|
||||||
|
PRICE_TREND_STRONGLY_RISING,
|
||||||
VOLATILITY_HIGH,
|
VOLATILITY_HIGH,
|
||||||
VOLATILITY_LOW,
|
VOLATILITY_LOW,
|
||||||
VOLATILITY_MODERATE,
|
VOLATILITY_MODERATE,
|
||||||
|
|
@ -44,6 +53,91 @@ VOLATILITY_FACTOR_NORMAL = 1.0 # Moderate volatility → baseline
|
||||||
VOLATILITY_FACTOR_INSENSITIVE = 1.4 # High volatility → noise filtering
|
VOLATILITY_FACTOR_INSENSITIVE = 1.4 # High volatility → noise filtering
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_coefficient_of_variation(prices: list[float]) -> float | None:
|
||||||
|
"""
|
||||||
|
Calculate coefficient of variation (CV) from price list.
|
||||||
|
|
||||||
|
CV = (std_dev / mean) * 100, expressed as percentage.
|
||||||
|
This is a standardized measure of volatility that works across different
|
||||||
|
price levels and period lengths.
|
||||||
|
|
||||||
|
Used by:
|
||||||
|
- Volatility sensors (via calculate_volatility_with_cv)
|
||||||
|
- Outlier filtering (adaptive confidence level)
|
||||||
|
- Period statistics
|
||||||
|
|
||||||
|
Args:
|
||||||
|
prices: List of price values (in any unit)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
CV as percentage (e.g., 15.0 for 15%), or None if calculation not possible
|
||||||
|
(fewer than 2 prices or mean is zero)
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
- CV ~5-10%: Very stable prices
|
||||||
|
- CV ~15-20%: Moderate variation
|
||||||
|
- CV ~30-50%: High volatility
|
||||||
|
- CV >50%: Extreme volatility
|
||||||
|
|
||||||
|
"""
|
||||||
|
if len(prices) < MIN_PRICES_FOR_VOLATILITY:
|
||||||
|
return None
|
||||||
|
|
||||||
|
mean = statistics.mean(prices)
|
||||||
|
if mean == 0:
|
||||||
|
return None
|
||||||
|
|
||||||
|
std_dev = statistics.stdev(prices)
|
||||||
|
# Use abs(mean) for negative prices (Norway/Germany electricity markets)
|
||||||
|
return (std_dev / abs(mean)) * 100
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_volatility_with_cv(
|
||||||
|
prices: list[float],
|
||||||
|
threshold_moderate: float | None = None,
|
||||||
|
threshold_high: float | None = None,
|
||||||
|
threshold_very_high: float | None = None,
|
||||||
|
) -> tuple[str, float | None]:
|
||||||
|
"""
|
||||||
|
Calculate volatility level AND coefficient of variation from price list.
|
||||||
|
|
||||||
|
Returns both the level string (for sensor state) and the numeric CV value
|
||||||
|
(for sensor attributes), allowing users to see the exact volatility percentage.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
prices: List of price values (in any unit)
|
||||||
|
threshold_moderate: Custom threshold for MODERATE level
|
||||||
|
threshold_high: Custom threshold for HIGH level
|
||||||
|
threshold_very_high: Custom threshold for VERY_HIGH level
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (level, cv):
|
||||||
|
- level: "LOW", "MODERATE", "HIGH", or "VERY_HIGH" (uppercase)
|
||||||
|
- cv: Coefficient of variation as percentage (e.g., 15.0), or None if not calculable
|
||||||
|
|
||||||
|
"""
|
||||||
|
cv = calculate_coefficient_of_variation(prices)
|
||||||
|
if cv is None:
|
||||||
|
return VOLATILITY_LOW, None
|
||||||
|
|
||||||
|
# Use provided thresholds or fall back to constants
|
||||||
|
t_moderate = threshold_moderate if threshold_moderate is not None else DEFAULT_VOLATILITY_THRESHOLD_MODERATE
|
||||||
|
t_high = threshold_high if threshold_high is not None else DEFAULT_VOLATILITY_THRESHOLD_HIGH
|
||||||
|
t_very_high = threshold_very_high if threshold_very_high is not None else DEFAULT_VOLATILITY_THRESHOLD_VERY_HIGH
|
||||||
|
|
||||||
|
# Classify based on thresholds
|
||||||
|
if cv < t_moderate:
|
||||||
|
level = VOLATILITY_LOW
|
||||||
|
elif cv < t_high:
|
||||||
|
level = VOLATILITY_MODERATE
|
||||||
|
elif cv < t_very_high:
|
||||||
|
level = VOLATILITY_HIGH
|
||||||
|
else:
|
||||||
|
level = VOLATILITY_VERY_HIGH
|
||||||
|
|
||||||
|
return level, cv
|
||||||
|
|
||||||
|
|
||||||
def calculate_volatility_level(
|
def calculate_volatility_level(
|
||||||
prices: list[float],
|
prices: list[float],
|
||||||
threshold_moderate: float | None = None,
|
threshold_moderate: float | None = None,
|
||||||
|
|
@ -78,34 +172,8 @@ def calculate_volatility_level(
|
||||||
Works identically for short periods (2-3 intervals) and long periods (96 intervals/day).
|
Works identically for short periods (2-3 intervals) and long periods (96 intervals/day).
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Need at least 2 values for standard deviation
|
level, _cv = calculate_volatility_with_cv(prices, threshold_moderate, threshold_high, threshold_very_high)
|
||||||
if len(prices) < MIN_PRICES_FOR_VOLATILITY:
|
return level
|
||||||
return VOLATILITY_LOW
|
|
||||||
|
|
||||||
# Use provided thresholds or fall back to constants
|
|
||||||
t_moderate = threshold_moderate if threshold_moderate is not None else DEFAULT_VOLATILITY_THRESHOLD_MODERATE
|
|
||||||
t_high = threshold_high if threshold_high is not None else DEFAULT_VOLATILITY_THRESHOLD_HIGH
|
|
||||||
t_very_high = threshold_very_high if threshold_very_high is not None else DEFAULT_VOLATILITY_THRESHOLD_VERY_HIGH
|
|
||||||
|
|
||||||
# Calculate coefficient of variation
|
|
||||||
# CRITICAL: Use absolute value of mean for negative prices (Norway/Germany)
|
|
||||||
# Negative electricity prices are valid and should have measurable volatility
|
|
||||||
mean = statistics.mean(prices)
|
|
||||||
if mean == 0:
|
|
||||||
# Division by zero case (all prices exactly zero)
|
|
||||||
return VOLATILITY_LOW
|
|
||||||
|
|
||||||
std_dev = statistics.stdev(prices)
|
|
||||||
coefficient_of_variation = (std_dev / abs(mean)) * 100 # As percentage, use abs(mean)
|
|
||||||
|
|
||||||
# Classify based on thresholds
|
|
||||||
if coefficient_of_variation < t_moderate:
|
|
||||||
return VOLATILITY_LOW
|
|
||||||
if coefficient_of_variation < t_high:
|
|
||||||
return VOLATILITY_MODERATE
|
|
||||||
if coefficient_of_variation < t_very_high:
|
|
||||||
return VOLATILITY_HIGH
|
|
||||||
return VOLATILITY_VERY_HIGH
|
|
||||||
|
|
||||||
|
|
||||||
def calculate_trailing_average_for_interval(
|
def calculate_trailing_average_for_interval(
|
||||||
|
|
@ -186,27 +254,41 @@ def calculate_difference_percentage(
|
||||||
return ((current_interval_price - trailing_average) / abs(trailing_average)) * 100
|
return ((current_interval_price - trailing_average) / abs(trailing_average)) * 100
|
||||||
|
|
||||||
|
|
||||||
def calculate_rating_level(
|
def calculate_rating_level( # noqa: PLR0911 - Multiple returns justified by clear hysteresis state machine
|
||||||
difference: float | None,
|
difference: float | None,
|
||||||
threshold_low: float,
|
threshold_low: float,
|
||||||
threshold_high: float,
|
threshold_high: float,
|
||||||
|
*,
|
||||||
|
previous_rating: str | None = None,
|
||||||
|
hysteresis: float = 0.0,
|
||||||
) -> str | None:
|
) -> str | None:
|
||||||
"""
|
"""
|
||||||
Calculate the rating level based on difference percentage and thresholds.
|
Calculate the rating level based on difference percentage and thresholds.
|
||||||
|
|
||||||
This mimics the API's "level" field from priceRating endpoint.
|
This mimics the API's "level" field from priceRating endpoint.
|
||||||
|
|
||||||
|
Supports hysteresis to prevent flickering at threshold boundaries. When a previous
|
||||||
|
rating is provided, the threshold for leaving that state is adjusted by the
|
||||||
|
hysteresis value, requiring a more significant change to switch states.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
difference: The difference percentage (from calculate_difference_percentage)
|
difference: The difference percentage (from calculate_difference_percentage)
|
||||||
threshold_low: The low threshold percentage (typically -100 to 0)
|
threshold_low: The low threshold percentage (typically -100 to 0)
|
||||||
threshold_high: The high threshold percentage (typically 0 to 100)
|
threshold_high: The high threshold percentage (typically 0 to 100)
|
||||||
|
previous_rating: The rating level of the previous interval (for hysteresis)
|
||||||
|
hysteresis: The hysteresis percentage (default 0.0 = no hysteresis)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
"LOW" if difference <= threshold_low
|
"LOW" if difference <= threshold_low (adjusted by hysteresis)
|
||||||
"HIGH" if difference >= threshold_high
|
"HIGH" if difference >= threshold_high (adjusted by hysteresis)
|
||||||
"NORMAL" otherwise
|
"NORMAL" otherwise
|
||||||
None if difference is None
|
None if difference is None
|
||||||
|
|
||||||
|
Example with hysteresis=2.0 and threshold_low=-10:
|
||||||
|
- To enter LOW from NORMAL: difference must be <= -10% (threshold_low)
|
||||||
|
- To leave LOW back to NORMAL: difference must be > -8% (threshold_low + hysteresis)
|
||||||
|
This creates a "dead zone" that prevents rapid switching at boundaries.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if difference is None:
|
if difference is None:
|
||||||
return None
|
return None
|
||||||
|
|
@ -222,7 +304,29 @@ def calculate_rating_level(
|
||||||
)
|
)
|
||||||
return PRICE_RATING_NORMAL
|
return PRICE_RATING_NORMAL
|
||||||
|
|
||||||
# Classify based on thresholds
|
# Apply hysteresis based on previous state
|
||||||
|
# The idea: make it "harder" to leave the current state than to enter it
|
||||||
|
if previous_rating == "LOW":
|
||||||
|
# Currently LOW: need to exceed threshold_low + hysteresis to leave
|
||||||
|
exit_threshold_low = threshold_low + hysteresis
|
||||||
|
if difference <= exit_threshold_low:
|
||||||
|
return "LOW"
|
||||||
|
# Check if we should go to HIGH (rare, but possible with large price swings)
|
||||||
|
if difference >= threshold_high:
|
||||||
|
return "HIGH"
|
||||||
|
return PRICE_RATING_NORMAL
|
||||||
|
|
||||||
|
if previous_rating == "HIGH":
|
||||||
|
# Currently HIGH: need to drop below threshold_high - hysteresis to leave
|
||||||
|
exit_threshold_high = threshold_high - hysteresis
|
||||||
|
if difference >= exit_threshold_high:
|
||||||
|
return "HIGH"
|
||||||
|
# Check if we should go to LOW (rare, but possible with large price swings)
|
||||||
|
if difference <= threshold_low:
|
||||||
|
return "LOW"
|
||||||
|
return PRICE_RATING_NORMAL
|
||||||
|
|
||||||
|
# No previous state or previous was NORMAL: use standard thresholds
|
||||||
if difference <= threshold_low:
|
if difference <= threshold_low:
|
||||||
return "LOW"
|
return "LOW"
|
||||||
|
|
||||||
|
|
@ -232,12 +336,15 @@ def calculate_rating_level(
|
||||||
return PRICE_RATING_NORMAL
|
return PRICE_RATING_NORMAL
|
||||||
|
|
||||||
|
|
||||||
def _process_price_interval(
|
def _process_price_interval( # noqa: PLR0913 - Extra params needed for hysteresis
|
||||||
price_interval: dict[str, Any],
|
price_interval: dict[str, Any],
|
||||||
all_prices: list[dict[str, Any]],
|
all_prices: list[dict[str, Any]],
|
||||||
threshold_low: float,
|
threshold_low: float,
|
||||||
threshold_high: float,
|
threshold_high: float,
|
||||||
) -> None:
|
*,
|
||||||
|
previous_rating: str | None = None,
|
||||||
|
hysteresis: float = 0.0,
|
||||||
|
) -> str | None:
|
||||||
"""
|
"""
|
||||||
Process a single price interval and add difference and rating_level.
|
Process a single price interval and add difference and rating_level.
|
||||||
|
|
||||||
|
|
@ -246,16 +353,20 @@ def _process_price_interval(
|
||||||
all_prices: All available price intervals for lookback calculation
|
all_prices: All available price intervals for lookback calculation
|
||||||
threshold_low: Low threshold percentage
|
threshold_low: Low threshold percentage
|
||||||
threshold_high: High threshold percentage
|
threshold_high: High threshold percentage
|
||||||
day_label: Label for logging ("today" or "tomorrow")
|
previous_rating: The rating level of the previous interval (for hysteresis)
|
||||||
|
hysteresis: The hysteresis percentage to prevent flickering
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The calculated rating_level (for use as previous_rating in next call)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
starts_at = price_interval.get("startsAt") # Already datetime object in local timezone
|
starts_at = price_interval.get("startsAt") # Already datetime object in local timezone
|
||||||
if not starts_at:
|
if not starts_at:
|
||||||
return
|
return previous_rating
|
||||||
current_interval_price = price_interval.get("total")
|
current_interval_price = price_interval.get("total")
|
||||||
|
|
||||||
if current_interval_price is None:
|
if current_interval_price is None:
|
||||||
return
|
return previous_rating
|
||||||
|
|
||||||
# Calculate trailing average
|
# Calculate trailing average
|
||||||
trailing_avg = calculate_trailing_average_for_interval(starts_at, all_prices)
|
trailing_avg = calculate_trailing_average_for_interval(starts_at, all_prices)
|
||||||
|
|
@ -265,20 +376,398 @@ def _process_price_interval(
|
||||||
difference = calculate_difference_percentage(float(current_interval_price), trailing_avg)
|
difference = calculate_difference_percentage(float(current_interval_price), trailing_avg)
|
||||||
price_interval["difference"] = difference
|
price_interval["difference"] = difference
|
||||||
|
|
||||||
# Calculate rating_level based on difference
|
# Calculate rating_level based on difference with hysteresis
|
||||||
rating_level = calculate_rating_level(difference, threshold_low, threshold_high)
|
rating_level = calculate_rating_level(
|
||||||
|
difference,
|
||||||
|
threshold_low,
|
||||||
|
threshold_high,
|
||||||
|
previous_rating=previous_rating,
|
||||||
|
hysteresis=hysteresis,
|
||||||
|
)
|
||||||
price_interval["rating_level"] = rating_level
|
price_interval["rating_level"] = rating_level
|
||||||
else:
|
return rating_level
|
||||||
# Set to None if we couldn't calculate (expected for intervals in first 24h)
|
|
||||||
price_interval["difference"] = None
|
# Set to None if we couldn't calculate (expected for intervals in first 24h)
|
||||||
price_interval["rating_level"] = None
|
price_interval["difference"] = None
|
||||||
|
price_interval["rating_level"] = None
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def enrich_price_info_with_differences(
|
def _build_rating_blocks(
|
||||||
|
rated_intervals: list[tuple[int, dict[str, Any], str]],
|
||||||
|
) -> list[tuple[int, int, str, int]]:
|
||||||
|
"""
|
||||||
|
Build list of contiguous rating blocks from rated intervals.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
rated_intervals: List of (original_idx, interval_dict, rating) tuples
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of (start_idx, end_idx, rating, length) tuples where indices
|
||||||
|
refer to positions in rated_intervals
|
||||||
|
|
||||||
|
"""
|
||||||
|
blocks: list[tuple[int, int, str, int]] = []
|
||||||
|
if not rated_intervals:
|
||||||
|
return blocks
|
||||||
|
|
||||||
|
block_start = 0
|
||||||
|
current_rating = rated_intervals[0][2]
|
||||||
|
|
||||||
|
for idx in range(1, len(rated_intervals)):
|
||||||
|
if rated_intervals[idx][2] != current_rating:
|
||||||
|
# End current block
|
||||||
|
blocks.append((block_start, idx - 1, current_rating, idx - block_start))
|
||||||
|
block_start = idx
|
||||||
|
current_rating = rated_intervals[idx][2]
|
||||||
|
|
||||||
|
# Don't forget the last block
|
||||||
|
blocks.append((block_start, len(rated_intervals) - 1, current_rating, len(rated_intervals) - block_start))
|
||||||
|
return blocks
|
||||||
|
|
||||||
|
|
||||||
|
def _build_level_blocks(
|
||||||
|
level_intervals: list[tuple[int, dict[str, Any], str]],
|
||||||
|
) -> list[tuple[int, int, str, int]]:
|
||||||
|
"""
|
||||||
|
Build list of contiguous price level blocks from intervals.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
level_intervals: List of (original_idx, interval_dict, level) tuples
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of (start_idx, end_idx, level, length) tuples where indices
|
||||||
|
refer to positions in level_intervals
|
||||||
|
|
||||||
|
"""
|
||||||
|
blocks: list[tuple[int, int, str, int]] = []
|
||||||
|
if not level_intervals:
|
||||||
|
return blocks
|
||||||
|
|
||||||
|
block_start = 0
|
||||||
|
current_level = level_intervals[0][2]
|
||||||
|
|
||||||
|
for idx in range(1, len(level_intervals)):
|
||||||
|
if level_intervals[idx][2] != current_level:
|
||||||
|
# End current block
|
||||||
|
blocks.append((block_start, idx - 1, current_level, idx - block_start))
|
||||||
|
block_start = idx
|
||||||
|
current_level = level_intervals[idx][2]
|
||||||
|
|
||||||
|
# Don't forget the last block
|
||||||
|
blocks.append((block_start, len(level_intervals) - 1, current_level, len(level_intervals) - block_start))
|
||||||
|
return blocks
|
||||||
|
|
||||||
|
|
||||||
|
def _calculate_gravitational_pull(
|
||||||
|
blocks: list[tuple[int, int, str, int]],
|
||||||
|
block_idx: int,
|
||||||
|
direction: str,
|
||||||
|
gap_tolerance: int,
|
||||||
|
) -> tuple[int, str]:
|
||||||
|
"""
|
||||||
|
Calculate "gravitational pull" from neighboring blocks in one direction.
|
||||||
|
|
||||||
|
This finds the first LARGE block (> gap_tolerance) in the given direction
|
||||||
|
and returns its size and rating. Small intervening blocks are "looked through".
|
||||||
|
|
||||||
|
This approach ensures that small isolated blocks are always pulled toward
|
||||||
|
the dominant large block, even if there are other small blocks in between.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
blocks: List of (start_idx, end_idx, rating, length) tuples
|
||||||
|
block_idx: Index of the current block being evaluated
|
||||||
|
direction: "left" or "right"
|
||||||
|
gap_tolerance: Maximum size of blocks considered "small"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (size, rating) of the first large block found,
|
||||||
|
or (immediate_neighbor_size, immediate_neighbor_rating) if no large block exists
|
||||||
|
|
||||||
|
"""
|
||||||
|
probe_range = range(block_idx - 1, -1, -1) if direction == "left" else range(block_idx + 1, len(blocks))
|
||||||
|
total_small_accumulated = 0
|
||||||
|
|
||||||
|
for probe_idx in probe_range:
|
||||||
|
probe_rating = blocks[probe_idx][2]
|
||||||
|
probe_size = blocks[probe_idx][3]
|
||||||
|
|
||||||
|
if probe_size > gap_tolerance:
|
||||||
|
# Found a large block - return its characteristics
|
||||||
|
# Add any accumulated small blocks of the same rating
|
||||||
|
if total_small_accumulated > 0:
|
||||||
|
return (probe_size + total_small_accumulated, probe_rating)
|
||||||
|
return (probe_size, probe_rating)
|
||||||
|
|
||||||
|
# Small block - accumulate if same rating as what we've seen
|
||||||
|
total_small_accumulated += probe_size
|
||||||
|
|
||||||
|
# No large block found - return the immediate neighbor's info
|
||||||
|
neighbor_idx = block_idx - 1 if direction == "left" else block_idx + 1
|
||||||
|
return (blocks[neighbor_idx][3], blocks[neighbor_idx][2])
|
||||||
|
|
||||||
|
|
||||||
|
def _apply_rating_gap_tolerance(
|
||||||
|
all_intervals: list[dict[str, Any]],
|
||||||
|
gap_tolerance: int,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Apply gap tolerance to smooth out isolated rating level changes.
|
||||||
|
|
||||||
|
This is a post-processing step after hysteresis. It identifies short sequences
|
||||||
|
of intervals (≤ gap_tolerance) and merges them into the larger neighboring block.
|
||||||
|
The algorithm is bidirectional - it compares block sizes on both sides and
|
||||||
|
assigns the small block to whichever neighbor is larger.
|
||||||
|
|
||||||
|
This matches human intuition: a single "different" interval feels like it
|
||||||
|
should belong to the larger surrounding group.
|
||||||
|
|
||||||
|
Example with gap_tolerance=1:
|
||||||
|
LOW LOW LOW NORMAL LOW LOW → LOW LOW LOW LOW LOW LOW
|
||||||
|
(single NORMAL gets merged into larger LOW block)
|
||||||
|
|
||||||
|
Example with gap_tolerance=1 (bidirectional):
|
||||||
|
NORMAL NORMAL HIGH NORMAL HIGH HIGH HIGH → NORMAL NORMAL HIGH HIGH HIGH HIGH HIGH
|
||||||
|
(single NORMAL at position 4 gets merged into larger HIGH block on the right)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
all_intervals: List of price intervals with rating_level already set (modified in-place)
|
||||||
|
gap_tolerance: Maximum number of consecutive "different" intervals to smooth out
|
||||||
|
|
||||||
|
Note:
|
||||||
|
- Compares block sizes on both sides and merges small blocks into larger neighbors
|
||||||
|
- If both neighbors have equal size, prefers the LEFT neighbor (earlier in time)
|
||||||
|
- Skips intervals without rating_level (None)
|
||||||
|
- Intervals must be sorted chronologically for this to work correctly
|
||||||
|
- Multiple passes may be needed as merging can create new small blocks
|
||||||
|
|
||||||
|
"""
|
||||||
|
if gap_tolerance <= 0:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Extract intervals with valid rating_level in chronological order
|
||||||
|
rated_intervals: list[tuple[int, dict[str, Any], str]] = [
|
||||||
|
(i, interval, interval["rating_level"])
|
||||||
|
for i, interval in enumerate(all_intervals)
|
||||||
|
if interval.get("rating_level") is not None
|
||||||
|
]
|
||||||
|
|
||||||
|
if len(rated_intervals) < 3: # noqa: PLR2004 - Minimum 3 for before/gap/after pattern
|
||||||
|
return
|
||||||
|
|
||||||
|
# Iteratively merge small blocks until no more changes
|
||||||
|
max_iterations = 10
|
||||||
|
total_corrections = 0
|
||||||
|
|
||||||
|
for iteration in range(max_iterations):
|
||||||
|
blocks = _build_rating_blocks(rated_intervals)
|
||||||
|
corrections_this_pass = _merge_small_blocks(blocks, rated_intervals, gap_tolerance)
|
||||||
|
total_corrections += corrections_this_pass
|
||||||
|
|
||||||
|
if corrections_this_pass == 0:
|
||||||
|
break
|
||||||
|
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Gap tolerance pass %d: merged %d small blocks",
|
||||||
|
iteration + 1,
|
||||||
|
corrections_this_pass,
|
||||||
|
)
|
||||||
|
|
||||||
|
if total_corrections > 0:
|
||||||
|
_LOGGER.debug("Gap tolerance: total %d block merges across all passes", total_corrections)
|
||||||
|
|
||||||
|
|
||||||
|
def _apply_level_gap_tolerance(
|
||||||
|
all_intervals: list[dict[str, Any]],
|
||||||
|
gap_tolerance: int,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Apply gap tolerance to smooth out isolated price level changes.
|
||||||
|
|
||||||
|
Similar to rating gap tolerance, but operates on Tibber's "level" field
|
||||||
|
(VERY_CHEAP, CHEAP, NORMAL, EXPENSIVE, VERY_EXPENSIVE). Identifies short
|
||||||
|
sequences of intervals (≤ gap_tolerance) and merges them into the larger
|
||||||
|
neighboring block.
|
||||||
|
|
||||||
|
Example with gap_tolerance=1:
|
||||||
|
CHEAP CHEAP CHEAP NORMAL CHEAP CHEAP → CHEAP CHEAP CHEAP CHEAP CHEAP CHEAP
|
||||||
|
(single NORMAL gets merged into larger CHEAP block)
|
||||||
|
|
||||||
|
Example with gap_tolerance=1 (bidirectional):
|
||||||
|
NORMAL NORMAL EXPENSIVE NORMAL EXPENSIVE EXPENSIVE EXPENSIVE →
|
||||||
|
NORMAL NORMAL EXPENSIVE EXPENSIVE EXPENSIVE EXPENSIVE EXPENSIVE
|
||||||
|
(single NORMAL at position 4 gets merged into larger EXPENSIVE block on the right)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
all_intervals: List of price intervals with level already set (modified in-place)
|
||||||
|
gap_tolerance: Maximum number of consecutive "different" intervals to smooth out
|
||||||
|
|
||||||
|
Note:
|
||||||
|
- Uses same bidirectional algorithm as rating gap tolerance
|
||||||
|
- Compares block sizes on both sides and merges small blocks into larger neighbors
|
||||||
|
- If both neighbors have equal size, prefers the LEFT neighbor (earlier in time)
|
||||||
|
- Skips intervals without level (None)
|
||||||
|
- Intervals must be sorted chronologically for this to work correctly
|
||||||
|
- Multiple passes may be needed as merging can create new small blocks
|
||||||
|
|
||||||
|
"""
|
||||||
|
if gap_tolerance <= 0:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Extract intervals with valid level in chronological order
|
||||||
|
level_intervals: list[tuple[int, dict[str, Any], str]] = [
|
||||||
|
(i, interval, interval["level"])
|
||||||
|
for i, interval in enumerate(all_intervals)
|
||||||
|
if interval.get("level") is not None
|
||||||
|
]
|
||||||
|
|
||||||
|
if len(level_intervals) < 3: # noqa: PLR2004 - Minimum 3 for before/gap/after pattern
|
||||||
|
return
|
||||||
|
|
||||||
|
# Iteratively merge small blocks until no more changes
|
||||||
|
max_iterations = 10
|
||||||
|
total_corrections = 0
|
||||||
|
|
||||||
|
for iteration in range(max_iterations):
|
||||||
|
blocks = _build_level_blocks(level_intervals)
|
||||||
|
corrections_this_pass = _merge_small_level_blocks(blocks, level_intervals, gap_tolerance)
|
||||||
|
total_corrections += corrections_this_pass
|
||||||
|
|
||||||
|
if corrections_this_pass == 0:
|
||||||
|
break
|
||||||
|
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Level gap tolerance pass %d: merged %d small blocks",
|
||||||
|
iteration + 1,
|
||||||
|
corrections_this_pass,
|
||||||
|
)
|
||||||
|
|
||||||
|
if total_corrections > 0:
|
||||||
|
_LOGGER.debug("Level gap tolerance: total %d block merges across all passes", total_corrections)
|
||||||
|
|
||||||
|
|
||||||
|
def _merge_small_blocks(
|
||||||
|
blocks: list[tuple[int, int, str, int]],
|
||||||
|
rated_intervals: list[tuple[int, dict[str, Any], str]],
|
||||||
|
gap_tolerance: int,
|
||||||
|
) -> int:
|
||||||
|
"""
|
||||||
|
Merge small blocks into their larger neighbors.
|
||||||
|
|
||||||
|
CRITICAL: This function collects ALL merge decisions FIRST, then applies them.
|
||||||
|
This prevents the order of processing from affecting outcomes. Without this,
|
||||||
|
earlier blocks could be merged incorrectly because the gravitational pull
|
||||||
|
calculation would see already-modified neighbors instead of the original state.
|
||||||
|
|
||||||
|
The merge decision is based on the FIRST LARGE BLOCK in each direction,
|
||||||
|
looking through any small intervening blocks. This ensures consistent
|
||||||
|
behavior when multiple small blocks are adjacent.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
blocks: List of (start_idx, end_idx, rating, length) tuples
|
||||||
|
rated_intervals: List of (original_idx, interval_dict, rating) tuples (modified in-place)
|
||||||
|
gap_tolerance: Maximum size of blocks to merge
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Number of blocks merged in this pass
|
||||||
|
|
||||||
|
"""
|
||||||
|
# Phase 1: Collect all merge decisions based on ORIGINAL block state
|
||||||
|
merge_decisions: list[tuple[int, int, str]] = [] # (start_ri_idx, end_ri_idx, target_rating)
|
||||||
|
|
||||||
|
for block_idx, (start, end, rating, length) in enumerate(blocks):
|
||||||
|
if length > gap_tolerance:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Must have neighbors on BOTH sides (not an edge block)
|
||||||
|
if block_idx == 0 or block_idx == len(blocks) - 1:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Calculate gravitational pull from each direction
|
||||||
|
left_pull, left_rating = _calculate_gravitational_pull(blocks, block_idx, "left", gap_tolerance)
|
||||||
|
right_pull, right_rating = _calculate_gravitational_pull(blocks, block_idx, "right", gap_tolerance)
|
||||||
|
|
||||||
|
# Determine target rating (prefer left if equal)
|
||||||
|
target_rating = left_rating if left_pull >= right_pull else right_rating
|
||||||
|
|
||||||
|
if rating != target_rating:
|
||||||
|
merge_decisions.append((start, end, target_rating))
|
||||||
|
|
||||||
|
# Phase 2: Apply all merge decisions
|
||||||
|
for start, end, target_rating in merge_decisions:
|
||||||
|
for ri_idx in range(start, end + 1):
|
||||||
|
original_idx, interval, _old_rating = rated_intervals[ri_idx]
|
||||||
|
interval["rating_level"] = target_rating
|
||||||
|
rated_intervals[ri_idx] = (original_idx, interval, target_rating)
|
||||||
|
|
||||||
|
return len(merge_decisions)
|
||||||
|
|
||||||
|
|
||||||
|
def _merge_small_level_blocks(
|
||||||
|
blocks: list[tuple[int, int, str, int]],
|
||||||
|
level_intervals: list[tuple[int, dict[str, Any], str]],
|
||||||
|
gap_tolerance: int,
|
||||||
|
) -> int:
|
||||||
|
"""
|
||||||
|
Merge small price level blocks into their larger neighbors.
|
||||||
|
|
||||||
|
CRITICAL: This function collects ALL merge decisions FIRST, then applies them.
|
||||||
|
This prevents the order of processing from affecting outcomes. Without this,
|
||||||
|
earlier blocks could be merged incorrectly because the gravitational pull
|
||||||
|
calculation would see already-modified neighbors instead of the original state.
|
||||||
|
|
||||||
|
The merge decision is based on the FIRST LARGE BLOCK in each direction,
|
||||||
|
looking through any small intervening blocks. This ensures consistent
|
||||||
|
behavior when multiple small blocks are adjacent.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
blocks: List of (start_idx, end_idx, level, length) tuples
|
||||||
|
level_intervals: List of (original_idx, interval_dict, level) tuples (modified in-place)
|
||||||
|
gap_tolerance: Maximum size of blocks to merge
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Number of blocks merged in this pass
|
||||||
|
|
||||||
|
"""
|
||||||
|
# Phase 1: Collect all merge decisions based on ORIGINAL block state
|
||||||
|
merge_decisions: list[tuple[int, int, str]] = [] # (start_li_idx, end_li_idx, target_level)
|
||||||
|
|
||||||
|
for block_idx, (start, end, level, length) in enumerate(blocks):
|
||||||
|
if length > gap_tolerance:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Must have neighbors on BOTH sides (not an edge block)
|
||||||
|
if block_idx == 0 or block_idx == len(blocks) - 1:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Calculate gravitational pull from each direction
|
||||||
|
left_pull, left_level = _calculate_gravitational_pull(blocks, block_idx, "left", gap_tolerance)
|
||||||
|
right_pull, right_level = _calculate_gravitational_pull(blocks, block_idx, "right", gap_tolerance)
|
||||||
|
|
||||||
|
# Determine target level (prefer left if equal)
|
||||||
|
target_level = left_level if left_pull >= right_pull else right_level
|
||||||
|
|
||||||
|
if level != target_level:
|
||||||
|
merge_decisions.append((start, end, target_level))
|
||||||
|
|
||||||
|
# Phase 2: Apply all merge decisions
|
||||||
|
for start, end, target_level in merge_decisions:
|
||||||
|
for li_idx in range(start, end + 1):
|
||||||
|
original_idx, interval, _old_level = level_intervals[li_idx]
|
||||||
|
interval["level"] = target_level
|
||||||
|
level_intervals[li_idx] = (original_idx, interval, target_level)
|
||||||
|
|
||||||
|
return len(merge_decisions)
|
||||||
|
|
||||||
|
|
||||||
|
def enrich_price_info_with_differences( # noqa: PLR0913 - Extra params for rating stabilization
|
||||||
all_intervals: list[dict[str, Any]],
|
all_intervals: list[dict[str, Any]],
|
||||||
*,
|
*,
|
||||||
threshold_low: float | None = None,
|
threshold_low: float | None = None,
|
||||||
threshold_high: float | None = None,
|
threshold_high: float | None = None,
|
||||||
|
hysteresis: float | None = None,
|
||||||
|
gap_tolerance: int | None = None,
|
||||||
|
level_gap_tolerance: int | None = None,
|
||||||
time: TibberPricesTimeService | None = None, # noqa: ARG001 # Used in production (via coordinator), kept for compatibility
|
time: TibberPricesTimeService | None = None, # noqa: ARG001 # Used in production (via coordinator), kept for compatibility
|
||||||
) -> list[dict[str, Any]]:
|
) -> list[dict[str, Any]]:
|
||||||
"""
|
"""
|
||||||
|
|
@ -287,15 +776,34 @@ def enrich_price_info_with_differences(
|
||||||
Computes the trailing 24-hour average, difference percentage, and rating level
|
Computes the trailing 24-hour average, difference percentage, and rating level
|
||||||
for intervals that have sufficient lookback data (in-place modification).
|
for intervals that have sufficient lookback data (in-place modification).
|
||||||
|
|
||||||
|
Uses hysteresis to prevent flickering at threshold boundaries. When an interval's
|
||||||
|
difference is near a threshold, hysteresis ensures that the rating only changes
|
||||||
|
when there's a significant movement, not just minor fluctuations.
|
||||||
|
|
||||||
|
After hysteresis, applies gap tolerance as post-processing to smooth out any
|
||||||
|
remaining isolated rating changes (e.g., a single NORMAL interval surrounded
|
||||||
|
by LOW intervals gets corrected to LOW).
|
||||||
|
|
||||||
|
Similarly, applies level gap tolerance to smooth out isolated price level changes
|
||||||
|
from Tibber's API (e.g., a single NORMAL interval surrounded by CHEAP intervals
|
||||||
|
gets corrected to CHEAP).
|
||||||
|
|
||||||
CRITICAL: Only enriches intervals that have at least 24 hours of prior data
|
CRITICAL: Only enriches intervals that have at least 24 hours of prior data
|
||||||
available. This is determined by checking if (interval_start - earliest_interval_start) >= 24h.
|
available. This is determined by checking if (interval_start - earliest_interval_start) >= 24h.
|
||||||
Works independently of interval density (24 vs 96 intervals/day) and handles
|
Works independently of interval density (24 vs 96 intervals/day) and handles
|
||||||
transition periods (e.g., Oct 1, 2025) correctly.
|
transition periods (e.g., Oct 1, 2025) correctly.
|
||||||
|
|
||||||
|
CRITICAL: Intervals are processed in chronological order to properly apply
|
||||||
|
hysteresis. The rating_level of each interval depends on the previous interval's
|
||||||
|
rating to prevent rapid switching at threshold boundaries.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
all_intervals: Flat list of all price intervals (day_before_yesterday + yesterday + today + tomorrow).
|
all_intervals: Flat list of all price intervals (day_before_yesterday + yesterday + today + tomorrow).
|
||||||
threshold_low: Low threshold percentage for rating_level (defaults to -10)
|
threshold_low: Low threshold percentage for rating_level (defaults to -10)
|
||||||
threshold_high: High threshold percentage for rating_level (defaults to 10)
|
threshold_high: High threshold percentage for rating_level (defaults to 10)
|
||||||
|
hysteresis: Hysteresis percentage to prevent flickering (defaults to 2.0)
|
||||||
|
gap_tolerance: Max consecutive intervals to smooth out for rating_level (defaults to 1, 0 = disabled)
|
||||||
|
level_gap_tolerance: Max consecutive intervals to smooth out for price level (defaults to 1, 0 = disabled)
|
||||||
time: TibberPricesTimeService instance (kept for API compatibility, not used)
|
time: TibberPricesTimeService instance (kept for API compatibility, not used)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
|
|
@ -311,6 +819,9 @@ def enrich_price_info_with_differences(
|
||||||
"""
|
"""
|
||||||
threshold_low = threshold_low if threshold_low is not None else -10
|
threshold_low = threshold_low if threshold_low is not None else -10
|
||||||
threshold_high = threshold_high if threshold_high is not None else 10
|
threshold_high = threshold_high if threshold_high is not None else 10
|
||||||
|
hysteresis = hysteresis if hysteresis is not None else DEFAULT_PRICE_RATING_HYSTERESIS
|
||||||
|
gap_tolerance = gap_tolerance if gap_tolerance is not None else DEFAULT_PRICE_RATING_GAP_TOLERANCE
|
||||||
|
level_gap_tolerance = level_gap_tolerance if level_gap_tolerance is not None else DEFAULT_PRICE_LEVEL_GAP_TOLERANCE
|
||||||
|
|
||||||
if not all_intervals:
|
if not all_intervals:
|
||||||
return all_intervals
|
return all_intervals
|
||||||
|
|
@ -330,25 +841,47 @@ def enrich_price_info_with_differences(
|
||||||
# Only intervals starting at or after this boundary have full 24h lookback
|
# Only intervals starting at or after this boundary have full 24h lookback
|
||||||
enrichment_boundary = earliest_start + timedelta(hours=24)
|
enrichment_boundary = earliest_start + timedelta(hours=24)
|
||||||
|
|
||||||
# Process intervals (modifies in-place)
|
# CRITICAL: Sort intervals by time for proper hysteresis application
|
||||||
|
# We need to process intervals in chronological order so each interval
|
||||||
|
# can use the previous interval's rating_level for hysteresis
|
||||||
|
intervals_with_time: list[tuple[dict[str, Any], datetime]] = [
|
||||||
|
(interval, starts_at) for interval in all_intervals if (starts_at := interval.get("startsAt")) is not None
|
||||||
|
]
|
||||||
|
intervals_with_time.sort(key=lambda x: x[1])
|
||||||
|
|
||||||
|
# Process intervals in chronological order (modifies in-place)
|
||||||
# CRITICAL: Only enrich intervals that start >= 24h after earliest data
|
# CRITICAL: Only enrich intervals that start >= 24h after earliest data
|
||||||
enriched_count = 0
|
enriched_count = 0
|
||||||
skipped_count = 0
|
skipped_count = 0
|
||||||
|
previous_rating: str | None = None
|
||||||
|
|
||||||
for price_interval in all_intervals:
|
for price_interval, starts_at in intervals_with_time:
|
||||||
starts_at = price_interval.get("startsAt")
|
|
||||||
if not starts_at:
|
|
||||||
skipped_count += 1
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Skip if interval doesn't have full 24h lookback
|
# Skip if interval doesn't have full 24h lookback
|
||||||
if starts_at < enrichment_boundary:
|
if starts_at < enrichment_boundary:
|
||||||
skipped_count += 1
|
skipped_count += 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
_process_price_interval(price_interval, all_intervals, threshold_low, threshold_high)
|
# Process interval and get its rating for use as previous_rating in next iteration
|
||||||
|
previous_rating = _process_price_interval(
|
||||||
|
price_interval,
|
||||||
|
all_intervals,
|
||||||
|
threshold_low,
|
||||||
|
threshold_high,
|
||||||
|
previous_rating=previous_rating,
|
||||||
|
hysteresis=hysteresis,
|
||||||
|
)
|
||||||
enriched_count += 1
|
enriched_count += 1
|
||||||
|
|
||||||
|
# Apply gap tolerance as post-processing step
|
||||||
|
# This smooths out isolated rating changes that slip through hysteresis
|
||||||
|
if gap_tolerance > 0:
|
||||||
|
_apply_rating_gap_tolerance(all_intervals, gap_tolerance)
|
||||||
|
|
||||||
|
# Apply level gap tolerance as post-processing step
|
||||||
|
# This smooths out isolated price level changes from Tibber's API
|
||||||
|
if level_gap_tolerance > 0:
|
||||||
|
_apply_level_gap_tolerance(all_intervals, level_gap_tolerance)
|
||||||
|
|
||||||
return all_intervals
|
return all_intervals
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -603,15 +1136,27 @@ def calculate_price_trend( # noqa: PLR0913 - All parameters are necessary for v
|
||||||
threshold_rising: float = 3.0,
|
threshold_rising: float = 3.0,
|
||||||
threshold_falling: float = -3.0,
|
threshold_falling: float = -3.0,
|
||||||
*,
|
*,
|
||||||
|
threshold_strongly_rising: float = 6.0,
|
||||||
|
threshold_strongly_falling: float = -6.0,
|
||||||
volatility_adjustment: bool = True,
|
volatility_adjustment: bool = True,
|
||||||
lookahead_intervals: int | None = None,
|
lookahead_intervals: int | None = None,
|
||||||
all_intervals: list[dict[str, Any]] | None = None,
|
all_intervals: list[dict[str, Any]] | None = None,
|
||||||
volatility_threshold_moderate: float = DEFAULT_VOLATILITY_THRESHOLD_MODERATE,
|
volatility_threshold_moderate: float = DEFAULT_VOLATILITY_THRESHOLD_MODERATE,
|
||||||
volatility_threshold_high: float = DEFAULT_VOLATILITY_THRESHOLD_HIGH,
|
volatility_threshold_high: float = DEFAULT_VOLATILITY_THRESHOLD_HIGH,
|
||||||
) -> tuple[str, float]:
|
) -> tuple[str, float, int]:
|
||||||
"""
|
"""
|
||||||
Calculate price trend by comparing current price with future average.
|
Calculate price trend by comparing current price with future average.
|
||||||
|
|
||||||
|
Uses a 5-level trend scale with integer values for automation comparisons:
|
||||||
|
- strongly_falling (-2): difference <= strongly_falling_threshold
|
||||||
|
- falling (-1): difference <= falling_threshold
|
||||||
|
- stable (0): difference between thresholds
|
||||||
|
- rising (+1): difference >= rising_threshold
|
||||||
|
- strongly_rising (+2): difference >= strongly_rising_threshold
|
||||||
|
|
||||||
|
The strong thresholds are independently configurable (not derived from base
|
||||||
|
thresholds), allowing fine-grained control over trend sensitivity.
|
||||||
|
|
||||||
Supports volatility-adaptive thresholds: when enabled, the effective threshold
|
Supports volatility-adaptive thresholds: when enabled, the effective threshold
|
||||||
is adjusted based on price volatility in the lookahead period. This makes the
|
is adjusted based on price volatility in the lookahead period. This makes the
|
||||||
trend detection more sensitive during stable periods and less noisy during
|
trend detection more sensitive during stable periods and less noisy during
|
||||||
|
|
@ -625,6 +1170,8 @@ def calculate_price_trend( # noqa: PLR0913 - All parameters are necessary for v
|
||||||
future_average: Average price of future intervals
|
future_average: Average price of future intervals
|
||||||
threshold_rising: Base threshold for rising trend (%, positive, default 3%)
|
threshold_rising: Base threshold for rising trend (%, positive, default 3%)
|
||||||
threshold_falling: Base threshold for falling trend (%, negative, default -3%)
|
threshold_falling: Base threshold for falling trend (%, negative, default -3%)
|
||||||
|
threshold_strongly_rising: Threshold for strongly rising (%, positive, default 6%)
|
||||||
|
threshold_strongly_falling: Threshold for strongly falling (%, negative, default -6%)
|
||||||
volatility_adjustment: Enable volatility-adaptive thresholds (default True)
|
volatility_adjustment: Enable volatility-adaptive thresholds (default True)
|
||||||
lookahead_intervals: Number of intervals in trend period for volatility calc
|
lookahead_intervals: Number of intervals in trend period for volatility calc
|
||||||
all_intervals: Price intervals (today + tomorrow) for volatility calculation
|
all_intervals: Price intervals (today + tomorrow) for volatility calculation
|
||||||
|
|
@ -632,9 +1179,10 @@ def calculate_price_trend( # noqa: PLR0913 - All parameters are necessary for v
|
||||||
volatility_threshold_high: User-configured high volatility threshold (%)
|
volatility_threshold_high: User-configured high volatility threshold (%)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tuple of (trend_state, difference_percentage)
|
Tuple of (trend_state, difference_percentage, trend_value)
|
||||||
trend_state: "rising" | "falling" | "stable"
|
trend_state: PRICE_TREND_* constant (e.g., "strongly_rising")
|
||||||
difference_percentage: % change from current to future ((future - current) / current * 100)
|
difference_percentage: % change from current to future ((future - current) / current * 100)
|
||||||
|
trend_value: Integer value from -2 to +2 for automation comparisons
|
||||||
|
|
||||||
Note:
|
Note:
|
||||||
Volatility adjustment factor:
|
Volatility adjustment factor:
|
||||||
|
|
@ -645,12 +1193,13 @@ def calculate_price_trend( # noqa: PLR0913 - All parameters are necessary for v
|
||||||
"""
|
"""
|
||||||
if current_interval_price == 0:
|
if current_interval_price == 0:
|
||||||
# Avoid division by zero - return stable trend
|
# Avoid division by zero - return stable trend
|
||||||
return "stable", 0.0
|
return PRICE_TREND_STABLE, 0.0, PRICE_TREND_MAPPING[PRICE_TREND_STABLE]
|
||||||
|
|
||||||
# Apply volatility adjustment if enabled and data available
|
# Apply volatility adjustment if enabled and data available
|
||||||
effective_rising = threshold_rising
|
effective_rising = threshold_rising
|
||||||
effective_falling = threshold_falling
|
effective_falling = threshold_falling
|
||||||
volatility_factor = 1.0
|
effective_strongly_rising = threshold_strongly_rising
|
||||||
|
effective_strongly_falling = threshold_strongly_falling
|
||||||
|
|
||||||
if volatility_adjustment and lookahead_intervals and all_intervals:
|
if volatility_adjustment and lookahead_intervals and all_intervals:
|
||||||
volatility_factor = _calculate_lookahead_volatility_factor(
|
volatility_factor = _calculate_lookahead_volatility_factor(
|
||||||
|
|
@ -658,22 +1207,25 @@ def calculate_price_trend( # noqa: PLR0913 - All parameters are necessary for v
|
||||||
)
|
)
|
||||||
effective_rising = threshold_rising * volatility_factor
|
effective_rising = threshold_rising * volatility_factor
|
||||||
effective_falling = threshold_falling * volatility_factor
|
effective_falling = threshold_falling * volatility_factor
|
||||||
|
effective_strongly_rising = threshold_strongly_rising * volatility_factor
|
||||||
|
effective_strongly_falling = threshold_strongly_falling * volatility_factor
|
||||||
|
|
||||||
# Calculate percentage difference from current to future
|
# Calculate percentage difference from current to future
|
||||||
# CRITICAL: Use abs() for negative prices to get correct percentage direction
|
# CRITICAL: Use abs() for negative prices to get correct percentage direction
|
||||||
# Example: current=-10, future=-5 → diff=5, pct=5/abs(-10)*100=+50% (correctly shows rising)
|
# Example: current=-10, future=-5 → diff=5, pct=5/abs(-10)*100=+50% (correctly shows rising)
|
||||||
if current_interval_price == 0:
|
diff_pct = ((future_average - current_interval_price) / abs(current_interval_price)) * 100
|
||||||
# Edge case: avoid division by zero
|
|
||||||
diff_pct = 0.0
|
|
||||||
else:
|
|
||||||
diff_pct = ((future_average - current_interval_price) / abs(current_interval_price)) * 100
|
|
||||||
|
|
||||||
# Determine trend based on effective thresholds
|
# Determine trend based on effective thresholds (5-level scale)
|
||||||
if diff_pct >= effective_rising:
|
# Check "strongly" conditions first (more extreme), then regular conditions
|
||||||
trend = "rising"
|
if diff_pct >= effective_strongly_rising:
|
||||||
|
trend = PRICE_TREND_STRONGLY_RISING
|
||||||
|
elif diff_pct >= effective_rising:
|
||||||
|
trend = PRICE_TREND_RISING
|
||||||
|
elif diff_pct <= effective_strongly_falling:
|
||||||
|
trend = PRICE_TREND_STRONGLY_FALLING
|
||||||
elif diff_pct <= effective_falling:
|
elif diff_pct <= effective_falling:
|
||||||
trend = "falling"
|
trend = PRICE_TREND_FALLING
|
||||||
else:
|
else:
|
||||||
trend = "stable"
|
trend = PRICE_TREND_STABLE
|
||||||
|
|
||||||
return trend, diff_pct
|
return trend, diff_pct, PRICE_TREND_MAPPING[trend]
|
||||||
|
|
|
||||||
186
docs/developer/versioned_docs/version-v0.23.1/api-reference.md
Normal file
186
docs/developer/versioned_docs/version-v0.23.1/api-reference.md
Normal file
|
|
@ -0,0 +1,186 @@
|
||||||
|
---
|
||||||
|
comments: false
|
||||||
|
---
|
||||||
|
|
||||||
|
# API Reference
|
||||||
|
|
||||||
|
Documentation of the Tibber GraphQL API used by this integration.
|
||||||
|
|
||||||
|
## GraphQL Endpoint
|
||||||
|
|
||||||
|
```
|
||||||
|
https://api.tibber.com/v1-beta/gql
|
||||||
|
```
|
||||||
|
|
||||||
|
**Authentication:** Bearer token in `Authorization` header
|
||||||
|
|
||||||
|
## Queries Used
|
||||||
|
|
||||||
|
### User Data Query
|
||||||
|
|
||||||
|
Fetches home information and metadata:
|
||||||
|
|
||||||
|
```graphql
|
||||||
|
query {
|
||||||
|
viewer {
|
||||||
|
homes {
|
||||||
|
id
|
||||||
|
appNickname
|
||||||
|
address {
|
||||||
|
address1
|
||||||
|
postalCode
|
||||||
|
city
|
||||||
|
country
|
||||||
|
}
|
||||||
|
timeZone
|
||||||
|
currentSubscription {
|
||||||
|
priceInfo {
|
||||||
|
current {
|
||||||
|
currency
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
meteringPointData {
|
||||||
|
consumptionEan
|
||||||
|
gridAreaCode
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Cached for:** 24 hours
|
||||||
|
|
||||||
|
### Price Data Query
|
||||||
|
|
||||||
|
Fetches quarter-hourly prices:
|
||||||
|
|
||||||
|
```graphql
|
||||||
|
query($homeId: ID!) {
|
||||||
|
viewer {
|
||||||
|
home(id: $homeId) {
|
||||||
|
currentSubscription {
|
||||||
|
priceInfo {
|
||||||
|
range(resolution: QUARTER_HOURLY, first: 384) {
|
||||||
|
nodes {
|
||||||
|
total
|
||||||
|
startsAt
|
||||||
|
level
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
- `homeId`: Tibber home identifier
|
||||||
|
- `resolution`: Always `QUARTER_HOURLY`
|
||||||
|
- `first`: 384 intervals (4 days of data)
|
||||||
|
|
||||||
|
**Cached until:** Midnight local time
|
||||||
|
|
||||||
|
## Rate Limits
|
||||||
|
|
||||||
|
Tibber API rate limits (as of 2024):
|
||||||
|
- **5000 requests per hour** per token
|
||||||
|
- **Burst limit:** 100 requests per minute
|
||||||
|
|
||||||
|
Integration stays well below these limits:
|
||||||
|
- Polls every 15 minutes = 96 requests/day
|
||||||
|
- User data cached for 24h = 1 request/day
|
||||||
|
- **Total:** ~100 requests/day per home
|
||||||
|
|
||||||
|
## Response Format
|
||||||
|
|
||||||
|
### Price Node Structure
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"total": 0.2456,
|
||||||
|
"startsAt": "2024-12-06T14:00:00.000+01:00",
|
||||||
|
"level": "NORMAL"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Fields:**
|
||||||
|
- `total`: Price including VAT and fees (currency's major unit, e.g., EUR)
|
||||||
|
- `startsAt`: ISO 8601 timestamp with timezone
|
||||||
|
- `level`: Tibber's own classification (VERY_CHEAP, CHEAP, NORMAL, EXPENSIVE, VERY_EXPENSIVE)
|
||||||
|
|
||||||
|
### Currency Information
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"currency": "EUR"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Supported currencies:
|
||||||
|
- `EUR` (Euro) - displayed as ct/kWh
|
||||||
|
- `NOK` (Norwegian Krone) - displayed as øre/kWh
|
||||||
|
- `SEK` (Swedish Krona) - displayed as öre/kWh
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
### Common Error Responses
|
||||||
|
|
||||||
|
**Invalid Token:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"errors": [{
|
||||||
|
"message": "Unauthorized",
|
||||||
|
"extensions": {
|
||||||
|
"code": "UNAUTHENTICATED"
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Rate Limit Exceeded:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"errors": [{
|
||||||
|
"message": "Too Many Requests",
|
||||||
|
"extensions": {
|
||||||
|
"code": "RATE_LIMIT_EXCEEDED"
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Home Not Found:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"errors": [{
|
||||||
|
"message": "Home not found",
|
||||||
|
"extensions": {
|
||||||
|
"code": "NOT_FOUND"
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Integration handles these with:
|
||||||
|
- Exponential backoff retry (3 attempts)
|
||||||
|
- ConfigEntryAuthFailed for auth errors
|
||||||
|
- ConfigEntryNotReady for temporary failures
|
||||||
|
|
||||||
|
## Data Transformation
|
||||||
|
|
||||||
|
Raw API data is enriched with:
|
||||||
|
- **Trailing 24h average** - Calculated from previous intervals
|
||||||
|
- **Leading 24h average** - Calculated from future intervals
|
||||||
|
- **Price difference %** - Deviation from average
|
||||||
|
- **Custom rating** - Based on user thresholds (different from Tibber's `level`)
|
||||||
|
|
||||||
|
See `utils/price.py` for enrichment logic.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
💡 **External Resources:**
|
||||||
|
- [Tibber API Documentation](https://developer.tibber.com/docs/overview)
|
||||||
|
- [GraphQL Explorer](https://developer.tibber.com/explorer)
|
||||||
|
- [Get API Token](https://developer.tibber.com/settings/access-token)
|
||||||
358
docs/developer/versioned_docs/version-v0.23.1/architecture.md
Normal file
358
docs/developer/versioned_docs/version-v0.23.1/architecture.md
Normal file
|
|
@ -0,0 +1,358 @@
|
||||||
|
---
|
||||||
|
comments: false
|
||||||
|
---
|
||||||
|
|
||||||
|
# Architecture
|
||||||
|
|
||||||
|
This document provides a visual overview of the integration's architecture, focusing on end-to-end data flow and caching layers.
|
||||||
|
|
||||||
|
For detailed implementation patterns, see [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.23.1/AGENTS.md).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## End-to-End Data Flow
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TB
|
||||||
|
%% External Systems
|
||||||
|
TIBBER[("🌐 Tibber GraphQL API<br/>api.tibber.com")]
|
||||||
|
HA[("🏠 Home Assistant<br/>Core")]
|
||||||
|
|
||||||
|
%% Entry Point
|
||||||
|
SETUP["__init__.py<br/>async_setup_entry()"]
|
||||||
|
|
||||||
|
%% Core Components
|
||||||
|
API["api.py<br/>TibberPricesApiClient<br/><br/>GraphQL queries"]
|
||||||
|
COORD["coordinator.py<br/>TibberPricesDataUpdateCoordinator<br/><br/>Orchestrates updates every 15min"]
|
||||||
|
|
||||||
|
%% Caching Layers
|
||||||
|
CACHE_API["💾 API Cache<br/>coordinator/cache.py<br/><br/>HA Storage (persistent)<br/>User: 24h | Prices: until midnight"]
|
||||||
|
CACHE_TRANS["💾 Transformation Cache<br/>coordinator/data_transformation.py<br/><br/>Memory (enriched prices)<br/>Until config change or midnight"]
|
||||||
|
CACHE_PERIOD["💾 Period Cache<br/>coordinator/periods.py<br/><br/>Memory (calculated periods)<br/>Hash-based invalidation"]
|
||||||
|
CACHE_CONFIG["💾 Config Cache<br/>coordinator/*<br/><br/>Memory (parsed options)<br/>Until config change"]
|
||||||
|
CACHE_TRANS_TEXT["💾 Translation Cache<br/>const.py<br/><br/>Memory (UI strings)<br/>Until HA restart"]
|
||||||
|
|
||||||
|
%% Processing Components
|
||||||
|
TRANSFORM["coordinator/data_transformation.py<br/>DataTransformer<br/><br/>Enrich prices with statistics"]
|
||||||
|
PERIODS["coordinator/periods.py<br/>PeriodCalculator<br/><br/>Calculate best/peak periods"]
|
||||||
|
ENRICH["price_utils.py + average_utils.py<br/><br/>Calculate trailing/leading averages<br/>rating_level, differences"]
|
||||||
|
|
||||||
|
%% Output Components
|
||||||
|
SENSORS["sensor/<br/>TibberPricesSensor<br/><br/>120+ price/level/rating sensors"]
|
||||||
|
BINARY["binary_sensor/<br/>TibberPricesBinarySensor<br/><br/>Period indicators"]
|
||||||
|
SERVICES["services/<br/><br/>Custom service endpoints<br/>(get_chartdata, ApexCharts)"]
|
||||||
|
|
||||||
|
%% Flow Connections
|
||||||
|
TIBBER -->|"Query user data<br/>Query prices<br/>(yesterday/today/tomorrow)"| API
|
||||||
|
|
||||||
|
API -->|"Raw GraphQL response"| COORD
|
||||||
|
|
||||||
|
COORD -->|"Check cache first"| CACHE_API
|
||||||
|
CACHE_API -.->|"Cache hit:<br/>Return cached"| COORD
|
||||||
|
CACHE_API -.->|"Cache miss:<br/>Fetch from API"| API
|
||||||
|
|
||||||
|
COORD -->|"Raw price data"| TRANSFORM
|
||||||
|
TRANSFORM -->|"Check cache"| CACHE_TRANS
|
||||||
|
CACHE_TRANS -.->|"Cache hit"| TRANSFORM
|
||||||
|
CACHE_TRANS -.->|"Cache miss"| ENRICH
|
||||||
|
ENRICH -->|"Enriched data"| TRANSFORM
|
||||||
|
|
||||||
|
TRANSFORM -->|"Enriched price data"| COORD
|
||||||
|
|
||||||
|
COORD -->|"Enriched data"| PERIODS
|
||||||
|
PERIODS -->|"Check cache"| CACHE_PERIOD
|
||||||
|
CACHE_PERIOD -.->|"Hash match:<br/>Return cached"| PERIODS
|
||||||
|
CACHE_PERIOD -.->|"Hash mismatch:<br/>Recalculate"| PERIODS
|
||||||
|
|
||||||
|
PERIODS -->|"Calculated periods"| COORD
|
||||||
|
|
||||||
|
COORD -->|"Complete data<br/>(prices + periods)"| SENSORS
|
||||||
|
COORD -->|"Complete data"| BINARY
|
||||||
|
COORD -->|"Data access"| SERVICES
|
||||||
|
|
||||||
|
SENSORS -->|"Entity states"| HA
|
||||||
|
BINARY -->|"Entity states"| HA
|
||||||
|
SERVICES -->|"Service responses"| HA
|
||||||
|
|
||||||
|
%% Config access
|
||||||
|
CACHE_CONFIG -.->|"Parsed options"| TRANSFORM
|
||||||
|
CACHE_CONFIG -.->|"Parsed options"| PERIODS
|
||||||
|
CACHE_TRANS_TEXT -.->|"UI strings"| SENSORS
|
||||||
|
CACHE_TRANS_TEXT -.->|"UI strings"| BINARY
|
||||||
|
|
||||||
|
SETUP -->|"Initialize"| COORD
|
||||||
|
SETUP -->|"Register"| SENSORS
|
||||||
|
SETUP -->|"Register"| BINARY
|
||||||
|
SETUP -->|"Register"| SERVICES
|
||||||
|
|
||||||
|
%% Styling
|
||||||
|
classDef external fill:#e1f5ff,stroke:#0288d1,stroke-width:3px
|
||||||
|
classDef cache fill:#fff3e0,stroke:#f57c00,stroke-width:2px
|
||||||
|
classDef processing fill:#f3e5f5,stroke:#7b1fa2,stroke-width:2px
|
||||||
|
classDef output fill:#e8f5e9,stroke:#388e3c,stroke-width:2px
|
||||||
|
|
||||||
|
class TIBBER,HA external
|
||||||
|
class CACHE_API,CACHE_TRANS,CACHE_PERIOD,CACHE_CONFIG,CACHE_TRANS_TEXT cache
|
||||||
|
class TRANSFORM,PERIODS,ENRICH processing
|
||||||
|
class SENSORS,BINARY,SERVICES output
|
||||||
|
```
|
||||||
|
|
||||||
|
### Flow Description
|
||||||
|
|
||||||
|
1. **Setup** (`__init__.py`)
|
||||||
|
- Integration loads, creates coordinator instance
|
||||||
|
- Registers entity platforms (sensor, binary_sensor)
|
||||||
|
- Sets up custom services
|
||||||
|
|
||||||
|
2. **Data Fetch** (every 15 minutes)
|
||||||
|
- Coordinator triggers update via `api.py`
|
||||||
|
- API client checks **persistent cache** first (`coordinator/cache.py`)
|
||||||
|
- If cache valid → return cached data
|
||||||
|
- If cache stale → query Tibber GraphQL API
|
||||||
|
- Store fresh data in persistent cache (survives HA restart)
|
||||||
|
|
||||||
|
3. **Price Enrichment**
|
||||||
|
- Coordinator passes raw prices to `DataTransformer`
|
||||||
|
- Transformer checks **transformation cache** (memory)
|
||||||
|
- If cache valid → return enriched data
|
||||||
|
- If cache invalid → enrich via `price_utils.py` + `average_utils.py`
|
||||||
|
- Calculate 24h trailing/leading averages
|
||||||
|
- Calculate price differences (% from average)
|
||||||
|
- Assign rating levels (LOW/NORMAL/HIGH)
|
||||||
|
- Store enriched data in transformation cache
|
||||||
|
|
||||||
|
4. **Period Calculation**
|
||||||
|
- Coordinator passes enriched data to `PeriodCalculator`
|
||||||
|
- Calculator computes **hash** from prices + config
|
||||||
|
- If hash matches cache → return cached periods
|
||||||
|
- If hash differs → recalculate best/peak price periods
|
||||||
|
- Store periods with new hash
|
||||||
|
|
||||||
|
5. **Entity Updates**
|
||||||
|
- Coordinator provides complete data (prices + periods)
|
||||||
|
- Sensors read values via unified handlers
|
||||||
|
- Binary sensors evaluate period states
|
||||||
|
- Entities update on quarter-hour boundaries (00/15/30/45)
|
||||||
|
|
||||||
|
6. **Service Calls**
|
||||||
|
- Custom services access coordinator data directly
|
||||||
|
- Return formatted responses (JSON, ApexCharts format)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Caching Architecture
|
||||||
|
|
||||||
|
### Overview
|
||||||
|
|
||||||
|
The integration uses **5 independent caching layers** for optimal performance:
|
||||||
|
|
||||||
|
| Layer | Location | Lifetime | Invalidation | Memory |
|
||||||
|
|-------|----------|----------|--------------|--------|
|
||||||
|
| **API Cache** | `coordinator/cache.py` | 24h (user)<br/>Until midnight (prices) | Automatic | 50KB |
|
||||||
|
| **Translation Cache** | `const.py` | Until HA restart | Never | 5KB |
|
||||||
|
| **Config Cache** | `coordinator/*` | Until config change | Explicit | 1KB |
|
||||||
|
| **Period Cache** | `coordinator/periods.py` | Until data/config change | Hash-based | 10KB |
|
||||||
|
| **Transformation Cache** | `coordinator/data_transformation.py` | Until midnight/config | Automatic | 60KB |
|
||||||
|
|
||||||
|
**Total cache overhead:** ~126KB per coordinator instance (main entry + subentries)
|
||||||
|
|
||||||
|
### Cache Coordination
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
USER[("User changes options")]
|
||||||
|
MIDNIGHT[("Midnight turnover")]
|
||||||
|
NEWDATA[("Tomorrow data arrives")]
|
||||||
|
|
||||||
|
USER -->|"Explicit invalidation"| CONFIG["Config Cache<br/>❌ Clear"]
|
||||||
|
USER -->|"Explicit invalidation"| PERIOD["Period Cache<br/>❌ Clear"]
|
||||||
|
USER -->|"Explicit invalidation"| TRANS["Transformation Cache<br/>❌ Clear"]
|
||||||
|
|
||||||
|
MIDNIGHT -->|"Date validation"| API["API Cache<br/>❌ Clear prices"]
|
||||||
|
MIDNIGHT -->|"Date check"| TRANS
|
||||||
|
|
||||||
|
NEWDATA -->|"Hash mismatch"| PERIOD
|
||||||
|
|
||||||
|
CONFIG -.->|"Next access"| CONFIG_NEW["Reparse options"]
|
||||||
|
PERIOD -.->|"Next access"| PERIOD_NEW["Recalculate"]
|
||||||
|
TRANS -.->|"Next access"| TRANS_NEW["Re-enrich"]
|
||||||
|
API -.->|"Next access"| API_NEW["Fetch from API"]
|
||||||
|
|
||||||
|
classDef invalid fill:#ffebee,stroke:#c62828,stroke-width:2px
|
||||||
|
classDef rebuild fill:#e8f5e9,stroke:#388e3c,stroke-width:2px
|
||||||
|
|
||||||
|
class CONFIG,PERIOD,TRANS,API invalid
|
||||||
|
class CONFIG_NEW,PERIOD_NEW,TRANS_NEW,API_NEW rebuild
|
||||||
|
```
|
||||||
|
|
||||||
|
**Key insight:** No cascading invalidations - each cache is independent and rebuilds on-demand.
|
||||||
|
|
||||||
|
For detailed cache behavior, see [Caching Strategy](./caching-strategy.md).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Component Responsibilities
|
||||||
|
|
||||||
|
### Core Components
|
||||||
|
|
||||||
|
| Component | File | Responsibility |
|
||||||
|
|-----------|------|----------------|
|
||||||
|
| **API Client** | `api.py` | GraphQL queries to Tibber, retry logic, error handling |
|
||||||
|
| **Coordinator** | `coordinator.py` | Update orchestration, cache management, absolute-time scheduling with boundary tolerance |
|
||||||
|
| **Data Transformer** | `coordinator/data_transformation.py` | Price enrichment (averages, ratings, differences) |
|
||||||
|
| **Period Calculator** | `coordinator/periods.py` | Best/peak price period calculation with relaxation |
|
||||||
|
| **Sensors** | `sensor/` | 80+ entities for prices, levels, ratings, statistics |
|
||||||
|
| **Binary Sensors** | `binary_sensor/` | Period indicators (best/peak price active) |
|
||||||
|
| **Services** | `services/` | Custom service endpoints (get_chartdata, get_apexcharts_yaml, refresh_user_data) |
|
||||||
|
|
||||||
|
### Sensor Architecture (Calculator Pattern)
|
||||||
|
|
||||||
|
The sensor platform uses **Calculator Pattern** for clean separation of concerns (refactored Nov 2025):
|
||||||
|
|
||||||
|
| Component | Files | Lines | Responsibility |
|
||||||
|
|-----------|-------|-------|----------------|
|
||||||
|
| **Entity Class** | `sensor/core.py` | 909 | Entity lifecycle, coordinator, delegates to calculators |
|
||||||
|
| **Calculators** | `sensor/calculators/` | 1,838 | Business logic (8 specialized calculators) |
|
||||||
|
| **Attributes** | `sensor/attributes/` | 1,209 | State presentation (8 specialized modules) |
|
||||||
|
| **Routing** | `sensor/value_getters.py` | 276 | Centralized sensor → calculator mapping |
|
||||||
|
| **Chart Export** | `sensor/chart_data.py` | 144 | Service call handling, YAML parsing |
|
||||||
|
| **Helpers** | `sensor/helpers.py` | 188 | Aggregation functions, utilities |
|
||||||
|
|
||||||
|
**Calculator Package** (`sensor/calculators/`):
|
||||||
|
- `base.py` - Abstract BaseCalculator with coordinator access
|
||||||
|
- `interval.py` - Single interval calculations (current/next/previous)
|
||||||
|
- `rolling_hour.py` - 5-interval rolling windows
|
||||||
|
- `daily_stat.py` - Calendar day min/max/avg statistics
|
||||||
|
- `window_24h.py` - Trailing/leading 24h windows
|
||||||
|
- `volatility.py` - Price volatility analysis
|
||||||
|
- `trend.py` - Complex trend analysis with caching
|
||||||
|
- `timing.py` - Best/peak price period timing
|
||||||
|
- `metadata.py` - Home/metering metadata
|
||||||
|
|
||||||
|
**Benefits:**
|
||||||
|
- 58% reduction in core.py (2,170 → 909 lines)
|
||||||
|
- Clear separation: Calculators (logic) vs Attributes (presentation)
|
||||||
|
- Independent testability for each calculator
|
||||||
|
- Easy to add sensors: Choose calculation pattern, add to routing
|
||||||
|
|
||||||
|
### Helper Utilities
|
||||||
|
|
||||||
|
| Utility | File | Purpose |
|
||||||
|
|---------|------|---------|
|
||||||
|
| **Price Utils** | `utils/price.py` | Rating calculation, enrichment, level aggregation |
|
||||||
|
| **Average Utils** | `utils/average.py` | Trailing/leading 24h average calculations |
|
||||||
|
| **Entity Utils** | `entity_utils/` | Shared icon/color/attribute logic |
|
||||||
|
| **Translations** | `const.py` | Translation loading and caching |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Key Patterns
|
||||||
|
|
||||||
|
### 1. Dual Translation System
|
||||||
|
|
||||||
|
- **Standard translations** (`/translations/*.json`): HA-compliant schema for entity names
|
||||||
|
- **Custom translations** (`/custom_translations/*.json`): Extended descriptions, usage tips
|
||||||
|
- Both loaded at integration setup, cached in memory
|
||||||
|
- Access via `get_translation()` helper function
|
||||||
|
|
||||||
|
### 2. Price Data Enrichment
|
||||||
|
|
||||||
|
All quarter-hourly price intervals get augmented via `utils/price.py`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Original from Tibber API
|
||||||
|
{
|
||||||
|
"startsAt": "2025-11-03T14:00:00+01:00",
|
||||||
|
"total": 0.2534,
|
||||||
|
"level": "NORMAL"
|
||||||
|
}
|
||||||
|
|
||||||
|
# After enrichment (utils/price.py)
|
||||||
|
{
|
||||||
|
"startsAt": "2025-11-03T14:00:00+01:00",
|
||||||
|
"total": 0.2534,
|
||||||
|
"level": "NORMAL",
|
||||||
|
"trailing_avg_24h": 0.2312, # ← Added: 24h trailing average
|
||||||
|
"difference": 9.6, # ← Added: % diff from trailing avg
|
||||||
|
"rating_level": "NORMAL" # ← Added: LOW/NORMAL/HIGH based on thresholds
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Quarter-Hour Precision
|
||||||
|
|
||||||
|
- **API polling**: Every 15 minutes (coordinator fetch cycle)
|
||||||
|
- **Entity updates**: On 00/15/30/45-minute boundaries via `coordinator/listeners.py`
|
||||||
|
- **Timer scheduling**: Uses `async_track_utc_time_change(minute=[0, 15, 30, 45], second=0)`
|
||||||
|
- HA may trigger ±few milliseconds before/after exact boundary
|
||||||
|
- Smart boundary tolerance (±2 seconds) handles scheduling jitter in `sensor/helpers.py`
|
||||||
|
- If HA schedules at 14:59:58 → rounds to 15:00:00 (shows new interval data)
|
||||||
|
- If HA restarts at 14:59:30 → stays at 14:45:00 (shows current interval data)
|
||||||
|
- **Absolute time tracking**: Timer plans for **all future boundaries** (not relative delays)
|
||||||
|
- Prevents double-updates (if triggered at 14:59:58, next trigger is 15:15:00, not 15:00:00)
|
||||||
|
- **Result**: Current price sensors update without waiting for next API poll
|
||||||
|
|
||||||
|
### 4. Calculator Pattern (Sensor Platform)
|
||||||
|
|
||||||
|
Sensors organized by **calculation method** (refactored Nov 2025):
|
||||||
|
|
||||||
|
**Unified Handler Methods** (`sensor/core.py`):
|
||||||
|
- `_get_interval_value(offset, type)` - current/next/previous intervals
|
||||||
|
- `_get_rolling_hour_value(offset, type)` - 5-interval rolling windows
|
||||||
|
- `_get_daily_stat_value(day, stat_func)` - calendar day min/max/avg
|
||||||
|
- `_get_24h_window_value(stat_func)` - trailing/leading statistics
|
||||||
|
|
||||||
|
**Routing** (`sensor/value_getters.py`):
|
||||||
|
- Single source of truth mapping 80+ entity keys to calculator methods
|
||||||
|
- Organized by calculation type (Interval, Rolling Hour, Daily Stats, etc.)
|
||||||
|
|
||||||
|
**Calculators** (`sensor/calculators/`):
|
||||||
|
- Each calculator inherits from `BaseCalculator` with coordinator access
|
||||||
|
- Focused responsibility: `IntervalCalculator`, `TrendCalculator`, etc.
|
||||||
|
- Complex logic isolated (e.g., `TrendCalculator` has internal caching)
|
||||||
|
|
||||||
|
**Attributes** (`sensor/attributes/`):
|
||||||
|
- Separate from business logic, handles state presentation
|
||||||
|
- Builds extra_state_attributes dicts for entity classes
|
||||||
|
- Unified builders: `build_sensor_attributes()`, `build_extra_state_attributes()`
|
||||||
|
|
||||||
|
**Benefits:**
|
||||||
|
- Minimal code duplication across 80+ sensors
|
||||||
|
- Clear separation of concerns (calculation vs presentation)
|
||||||
|
- Easy to extend: Add sensor → choose pattern → add to routing
|
||||||
|
- Independent testability for each component
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Performance Characteristics
|
||||||
|
|
||||||
|
### API Call Reduction
|
||||||
|
|
||||||
|
- **Without caching:** 96 API calls/day (every 15 min)
|
||||||
|
- **With caching:** ~1-2 API calls/day (only when cache expires)
|
||||||
|
- **Reduction:** ~98%
|
||||||
|
|
||||||
|
### CPU Optimization
|
||||||
|
|
||||||
|
| Optimization | Location | Savings |
|
||||||
|
|--------------|----------|---------|
|
||||||
|
| Config caching | `coordinator/*` | ~50% on config checks |
|
||||||
|
| Period caching | `coordinator/periods.py` | ~70% on period recalculation |
|
||||||
|
| Lazy logging | Throughout | ~15% on log-heavy operations |
|
||||||
|
| Import optimization | Module structure | ~20% faster loading |
|
||||||
|
|
||||||
|
### Memory Usage
|
||||||
|
|
||||||
|
- **Per coordinator instance:** ~126KB cache overhead
|
||||||
|
- **Typical setup:** 1 main + 2 subentries = ~378KB total
|
||||||
|
- **Redundancy eliminated:** 14% reduction (10KB saved per coordinator)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Related Documentation
|
||||||
|
|
||||||
|
- **[Timer Architecture](./timer-architecture.md)** - Timer system, scheduling, coordination (3 independent timers)
|
||||||
|
- **[Caching Strategy](./caching-strategy.md)** - Detailed cache behavior, invalidation, debugging
|
||||||
|
- **[Setup Guide](./setup.md)** - Development environment setup
|
||||||
|
- **[Testing Guide](./testing.md)** - How to test changes
|
||||||
|
- **[Release Management](./release-management.md)** - Release workflow and versioning
|
||||||
|
- **[AGENTS.md](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.23.1/AGENTS.md)** - Complete reference for AI development
|
||||||
|
|
@ -0,0 +1,447 @@
|
||||||
|
---
|
||||||
|
comments: false
|
||||||
|
---
|
||||||
|
|
||||||
|
# Caching Strategy
|
||||||
|
|
||||||
|
This document explains all caching mechanisms in the Tibber Prices integration, their purpose, invalidation logic, and lifetime.
|
||||||
|
|
||||||
|
For timer coordination and scheduling details, see [Timer Architecture](./timer-architecture.md).
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The integration uses **4 distinct caching layers** with different purposes and lifetimes:
|
||||||
|
|
||||||
|
1. **Persistent API Data Cache** (HA Storage) - Hours to days
|
||||||
|
2. **Translation Cache** (Memory) - Forever (until HA restart)
|
||||||
|
3. **Config Dictionary Cache** (Memory) - Until config changes
|
||||||
|
4. **Period Calculation Cache** (Memory) - Until price data or config changes
|
||||||
|
|
||||||
|
## 1. Persistent API Data Cache
|
||||||
|
|
||||||
|
**Location:** `coordinator/cache.py` → HA Storage (`.storage/tibber_prices.<entry_id>`)
|
||||||
|
|
||||||
|
**Purpose:** Reduce API calls to Tibber by caching user data and price data between HA restarts.
|
||||||
|
|
||||||
|
**What is cached:**
|
||||||
|
- **Price data** (`price_data`): Day before yesterday/yesterday/today/tomorrow price intervals with enriched fields (384 intervals total)
|
||||||
|
- **User data** (`user_data`): Homes, subscriptions, features from Tibber GraphQL `viewer` query
|
||||||
|
- **Timestamps**: Last update times for validation
|
||||||
|
|
||||||
|
**Lifetime:**
|
||||||
|
- **Price data**: Until midnight turnover (cleared daily at 00:00 local time)
|
||||||
|
- **User data**: 24 hours (refreshed daily)
|
||||||
|
- **Survives**: HA restarts via persistent Storage
|
||||||
|
|
||||||
|
**Invalidation triggers:**
|
||||||
|
|
||||||
|
1. **Midnight turnover** (Timer #2 in coordinator):
|
||||||
|
```python
|
||||||
|
# coordinator/day_transitions.py
|
||||||
|
def _handle_midnight_turnover() -> None:
|
||||||
|
self._cached_price_data = None # Force fresh fetch for new day
|
||||||
|
self._last_price_update = None
|
||||||
|
await self.store_cache()
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Cache validation on load**:
|
||||||
|
```python
|
||||||
|
# coordinator/cache.py
|
||||||
|
def is_cache_valid(cache_data: CacheData) -> bool:
|
||||||
|
# Checks if price data is from a previous day
|
||||||
|
if today_date < local_now.date(): # Yesterday's data
|
||||||
|
return False
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Tomorrow data check** (after 13:00):
|
||||||
|
```python
|
||||||
|
# coordinator/data_fetching.py
|
||||||
|
if tomorrow_missing or tomorrow_invalid:
|
||||||
|
return "tomorrow_check" # Update needed
|
||||||
|
```
|
||||||
|
|
||||||
|
**Why this cache matters:** Reduces API load on Tibber (~192 intervals per fetch), speeds up HA restarts, enables offline operation until cache expires.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2. Translation Cache
|
||||||
|
|
||||||
|
**Location:** `const.py` → `_TRANSLATIONS_CACHE` and `_STANDARD_TRANSLATIONS_CACHE` (in-memory dicts)
|
||||||
|
|
||||||
|
**Purpose:** Avoid repeated file I/O when accessing entity descriptions, UI strings, etc.
|
||||||
|
|
||||||
|
**What is cached:**
|
||||||
|
- **Standard translations** (`/translations/*.json`): Config flow, selector options, entity names
|
||||||
|
- **Custom translations** (`/custom_translations/*.json`): Entity descriptions, usage tips, long descriptions
|
||||||
|
|
||||||
|
**Lifetime:**
|
||||||
|
- **Forever** (until HA restart)
|
||||||
|
- No invalidation during runtime
|
||||||
|
|
||||||
|
**When populated:**
|
||||||
|
- At integration setup: `async_load_translations(hass, "en")` in `__init__.py`
|
||||||
|
- Lazy loading: If translation missing, attempts file load once
|
||||||
|
|
||||||
|
**Access pattern:**
|
||||||
|
```python
|
||||||
|
# Non-blocking synchronous access from cached data
|
||||||
|
description = get_translation("binary_sensor.best_price_period.description", "en")
|
||||||
|
```
|
||||||
|
|
||||||
|
**Why this cache matters:** Entity attributes are accessed on every state update (~15 times per hour per entity). File I/O would block the event loop. Cache enables synchronous, non-blocking attribute generation.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3. Config Dictionary Cache
|
||||||
|
|
||||||
|
**Location:** `coordinator/data_transformation.py` and `coordinator/periods.py` (per-instance fields)
|
||||||
|
|
||||||
|
**Purpose:** Avoid ~30-40 `options.get()` calls on every coordinator update (every 15 minutes).
|
||||||
|
|
||||||
|
**What is cached:**
|
||||||
|
|
||||||
|
### DataTransformer Config Cache
|
||||||
|
```python
|
||||||
|
{
|
||||||
|
"thresholds": {"low": 15, "high": 35},
|
||||||
|
"volatility_thresholds": {"moderate": 15.0, "high": 25.0, "very_high": 40.0},
|
||||||
|
# ... 20+ more config fields
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### PeriodCalculator Config Cache
|
||||||
|
```python
|
||||||
|
{
|
||||||
|
"best": {"flex": 0.15, "min_distance_from_avg": 5.0, "min_period_length": 60},
|
||||||
|
"peak": {"flex": 0.15, "min_distance_from_avg": 5.0, "min_period_length": 60}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Lifetime:**
|
||||||
|
- Until `invalidate_config_cache()` is called
|
||||||
|
- Built once on first use per coordinator update cycle
|
||||||
|
|
||||||
|
**Invalidation trigger:**
|
||||||
|
- **Options change** (user reconfigures integration):
|
||||||
|
```python
|
||||||
|
# coordinator/core.py
|
||||||
|
async def _handle_options_update(...) -> None:
|
||||||
|
self._data_transformer.invalidate_config_cache()
|
||||||
|
self._period_calculator.invalidate_config_cache()
|
||||||
|
await self.async_request_refresh()
|
||||||
|
```
|
||||||
|
|
||||||
|
**Performance impact:**
|
||||||
|
- **Before:** ~30 dict lookups + type conversions per update = ~50μs
|
||||||
|
- **After:** 1 cache check = ~1μs
|
||||||
|
- **Savings:** ~98% (50μs → 1μs per update)
|
||||||
|
|
||||||
|
**Why this cache matters:** Config is read multiple times per update (transformation + period calculation + validation). Caching eliminates redundant lookups without changing behavior.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4. Period Calculation Cache
|
||||||
|
|
||||||
|
**Location:** `coordinator/periods.py` → `PeriodCalculator._cached_periods`
|
||||||
|
|
||||||
|
**Purpose:** Avoid expensive period calculations (~100-500ms) when price data and config haven't changed.
|
||||||
|
|
||||||
|
**What is cached:**
|
||||||
|
```python
|
||||||
|
{
|
||||||
|
"best_price": {
|
||||||
|
"periods": [...], # Calculated period objects
|
||||||
|
"intervals": [...], # All intervals in periods
|
||||||
|
"metadata": {...} # Config snapshot
|
||||||
|
},
|
||||||
|
"best_price_relaxation": {"relaxation_active": bool, ...},
|
||||||
|
"peak_price": {...},
|
||||||
|
"peak_price_relaxation": {...}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Cache key:** Hash of relevant inputs
|
||||||
|
```python
|
||||||
|
hash_data = (
|
||||||
|
today_signature, # (startsAt, rating_level) for each interval
|
||||||
|
tuple(best_config.items()), # Best price config
|
||||||
|
tuple(peak_config.items()), # Peak price config
|
||||||
|
best_level_filter, # Level filter overrides
|
||||||
|
peak_level_filter
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Lifetime:**
|
||||||
|
- Until price data changes (today's intervals modified)
|
||||||
|
- Until config changes (flex, thresholds, filters)
|
||||||
|
- Recalculated at midnight (new today data)
|
||||||
|
|
||||||
|
**Invalidation triggers:**
|
||||||
|
|
||||||
|
1. **Config change** (explicit):
|
||||||
|
```python
|
||||||
|
def invalidate_config_cache() -> None:
|
||||||
|
self._cached_periods = None
|
||||||
|
self._last_periods_hash = None
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Price data change** (automatic via hash mismatch):
|
||||||
|
```python
|
||||||
|
current_hash = self._compute_periods_hash(price_info)
|
||||||
|
if self._last_periods_hash != current_hash:
|
||||||
|
# Cache miss - recalculate
|
||||||
|
```
|
||||||
|
|
||||||
|
**Cache hit rate:**
|
||||||
|
- **High:** During normal operation (coordinator updates every 15min, price data unchanged)
|
||||||
|
- **Low:** After midnight (new today data) or when tomorrow data arrives (~13:00-14:00)
|
||||||
|
|
||||||
|
**Performance impact:**
|
||||||
|
- **Period calculation:** ~100-500ms (depends on interval count, relaxation attempts)
|
||||||
|
- **Cache hit:** `<`1ms (hash comparison + dict lookup)
|
||||||
|
- **Savings:** ~70% of calculation time (most updates hit cache)
|
||||||
|
|
||||||
|
**Why this cache matters:** Period calculation is CPU-intensive (filtering, gap tolerance, relaxation). Caching avoids recalculating unchanged periods 3-4 times per hour.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5. Transformation Cache (Price Enrichment Only)
|
||||||
|
|
||||||
|
**Location:** `coordinator/data_transformation.py` → `_cached_transformed_data`
|
||||||
|
|
||||||
|
**Status:** ✅ **Clean separation** - enrichment only, no redundancy
|
||||||
|
|
||||||
|
**What is cached:**
|
||||||
|
```python
|
||||||
|
{
|
||||||
|
"timestamp": ...,
|
||||||
|
"homes": {...},
|
||||||
|
"priceInfo": {...}, # Enriched price data (trailing_avg_24h, difference, rating_level)
|
||||||
|
# NO periods - periods are exclusively managed by PeriodCalculator
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Purpose:** Avoid re-enriching price data when config unchanged between midnight checks.
|
||||||
|
|
||||||
|
**Current behavior:**
|
||||||
|
- Caches **only enriched price data** (price + statistics)
|
||||||
|
- **Does NOT cache periods** (handled by Period Calculation Cache)
|
||||||
|
- Invalidated when:
|
||||||
|
- Config changes (thresholds affect enrichment)
|
||||||
|
- Midnight turnover detected
|
||||||
|
- New update cycle begins
|
||||||
|
|
||||||
|
**Architecture:**
|
||||||
|
- DataTransformer: Handles price enrichment only
|
||||||
|
- PeriodCalculator: Handles period calculation only (with hash-based cache)
|
||||||
|
- Coordinator: Assembles final data on-demand from both caches
|
||||||
|
|
||||||
|
**Memory savings:** Eliminating redundant period storage saves ~10KB per coordinator (14% reduction).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Cache Invalidation Flow
|
||||||
|
|
||||||
|
### User Changes Options (Config Flow)
|
||||||
|
```
|
||||||
|
User saves options
|
||||||
|
↓
|
||||||
|
config_entry.add_update_listener() triggers
|
||||||
|
↓
|
||||||
|
coordinator._handle_options_update()
|
||||||
|
↓
|
||||||
|
├─> DataTransformer.invalidate_config_cache()
|
||||||
|
│ └─> _config_cache = None
|
||||||
|
│ _config_cache_valid = False
|
||||||
|
│ _cached_transformed_data = None
|
||||||
|
│
|
||||||
|
└─> PeriodCalculator.invalidate_config_cache()
|
||||||
|
└─> _config_cache = None
|
||||||
|
_config_cache_valid = False
|
||||||
|
_cached_periods = None
|
||||||
|
_last_periods_hash = None
|
||||||
|
↓
|
||||||
|
coordinator.async_request_refresh()
|
||||||
|
↓
|
||||||
|
Fresh data fetch with new config
|
||||||
|
```
|
||||||
|
|
||||||
|
### Midnight Turnover (Day Transition)
|
||||||
|
```
|
||||||
|
Timer #2 fires at 00:00
|
||||||
|
↓
|
||||||
|
coordinator._handle_midnight_turnover()
|
||||||
|
↓
|
||||||
|
├─> Clear persistent cache
|
||||||
|
│ └─> _cached_price_data = None
|
||||||
|
│ _last_price_update = None
|
||||||
|
│
|
||||||
|
└─> Clear transformation cache
|
||||||
|
└─> _cached_transformed_data = None
|
||||||
|
_last_transformation_config = None
|
||||||
|
↓
|
||||||
|
Period cache auto-invalidates (hash mismatch on new "today")
|
||||||
|
↓
|
||||||
|
Fresh API fetch for new day
|
||||||
|
```
|
||||||
|
|
||||||
|
### Tomorrow Data Arrives (~13:00)
|
||||||
|
```
|
||||||
|
Coordinator update cycle
|
||||||
|
↓
|
||||||
|
should_update_price_data() checks tomorrow
|
||||||
|
↓
|
||||||
|
Tomorrow data missing/invalid
|
||||||
|
↓
|
||||||
|
API fetch with new tomorrow data
|
||||||
|
↓
|
||||||
|
Price data hash changes (new intervals)
|
||||||
|
↓
|
||||||
|
Period cache auto-invalidates (hash mismatch)
|
||||||
|
↓
|
||||||
|
Periods recalculated with tomorrow included
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Cache Coordination
|
||||||
|
|
||||||
|
**All caches work together:**
|
||||||
|
|
||||||
|
```
|
||||||
|
Persistent Storage (HA restart)
|
||||||
|
↓
|
||||||
|
API Data Cache (price_data, user_data)
|
||||||
|
↓
|
||||||
|
├─> Enrichment (add rating_level, difference, etc.)
|
||||||
|
│ ↓
|
||||||
|
│ Transformation Cache (_cached_transformed_data)
|
||||||
|
│
|
||||||
|
└─> Period Calculation
|
||||||
|
↓
|
||||||
|
Period Cache (_cached_periods)
|
||||||
|
↓
|
||||||
|
Config Cache (avoid re-reading options)
|
||||||
|
↓
|
||||||
|
Translation Cache (entity descriptions)
|
||||||
|
```
|
||||||
|
|
||||||
|
**No cache invalidation cascades:**
|
||||||
|
- Config cache invalidation is **explicit** (on options update)
|
||||||
|
- Period cache invalidation is **automatic** (via hash mismatch)
|
||||||
|
- Transformation cache invalidation is **automatic** (on midnight/config change)
|
||||||
|
- Translation cache is **never invalidated** (read-only after load)
|
||||||
|
|
||||||
|
**Thread safety:**
|
||||||
|
- All caches are accessed from `MainThread` only (Home Assistant event loop)
|
||||||
|
- No locking needed (single-threaded execution model)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Performance Characteristics
|
||||||
|
|
||||||
|
### Typical Operation (No Changes)
|
||||||
|
```
|
||||||
|
Coordinator Update (every 15 min)
|
||||||
|
├─> API fetch: SKIP (cache valid)
|
||||||
|
├─> Config dict build: ~1μs (cached)
|
||||||
|
├─> Period calculation: ~1ms (cached, hash match)
|
||||||
|
├─> Transformation: ~10ms (enrichment only, periods cached)
|
||||||
|
└─> Entity updates: ~5ms (translation cache hit)
|
||||||
|
|
||||||
|
Total: ~16ms (down from ~600ms without caching)
|
||||||
|
```
|
||||||
|
|
||||||
|
### After Midnight Turnover
|
||||||
|
```
|
||||||
|
Coordinator Update (00:00)
|
||||||
|
├─> API fetch: ~500ms (cache cleared, fetch new day)
|
||||||
|
├─> Config dict build: ~50μs (rebuild, no cache)
|
||||||
|
├─> Period calculation: ~200ms (cache miss, recalculate)
|
||||||
|
├─> Transformation: ~50ms (re-enrich, rebuild)
|
||||||
|
└─> Entity updates: ~5ms (translation cache still valid)
|
||||||
|
|
||||||
|
Total: ~755ms (expected once per day)
|
||||||
|
```
|
||||||
|
|
||||||
|
### After Config Change
|
||||||
|
```
|
||||||
|
Options Update
|
||||||
|
├─> Cache invalidation: `<`1ms
|
||||||
|
├─> Coordinator refresh: ~600ms
|
||||||
|
│ ├─> API fetch: SKIP (data unchanged)
|
||||||
|
│ ├─> Config rebuild: ~50μs
|
||||||
|
│ ├─> Period recalculation: ~200ms (new thresholds)
|
||||||
|
│ ├─> Re-enrichment: ~50ms
|
||||||
|
│ └─> Entity updates: ~5ms
|
||||||
|
└─> Total: ~600ms (expected on manual reconfiguration)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Summary Table
|
||||||
|
|
||||||
|
| Cache Type | Lifetime | Size | Invalidation | Purpose |
|
||||||
|
|------------|----------|------|--------------|---------|
|
||||||
|
| **API Data** | Hours to 1 day | ~50KB | Midnight, validation | Reduce API calls |
|
||||||
|
| **Translations** | Forever (until HA restart) | ~5KB | Never | Avoid file I/O |
|
||||||
|
| **Config Dicts** | Until options change | `<`1KB | Explicit (options update) | Avoid dict lookups |
|
||||||
|
| **Period Calculation** | Until data/config change | ~10KB | Auto (hash mismatch) | Avoid CPU-intensive calculation |
|
||||||
|
| **Transformation** | Until midnight/config change | ~50KB | Auto (midnight/config) | Avoid re-enrichment |
|
||||||
|
|
||||||
|
**Total memory overhead:** ~116KB per coordinator instance (main + subentries)
|
||||||
|
|
||||||
|
**Benefits:**
|
||||||
|
- 97% reduction in API calls (from every 15min to once per day)
|
||||||
|
- 70% reduction in period calculation time (cache hits during normal operation)
|
||||||
|
- 98% reduction in config access time (30+ lookups → 1 cache check)
|
||||||
|
- Zero file I/O during runtime (translations cached at startup)
|
||||||
|
|
||||||
|
**Trade-offs:**
|
||||||
|
- Memory usage: ~116KB per home (negligible for modern systems)
|
||||||
|
- Code complexity: 5 cache invalidation points (well-tested, documented)
|
||||||
|
- Debugging: Must understand cache lifetime when investigating stale data issues
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Debugging Cache Issues
|
||||||
|
|
||||||
|
### Symptom: Stale data after config change
|
||||||
|
**Check:**
|
||||||
|
1. Is `_handle_options_update()` called? (should see "Options updated" log)
|
||||||
|
2. Are `invalidate_config_cache()` methods executed?
|
||||||
|
3. Does `async_request_refresh()` trigger?
|
||||||
|
|
||||||
|
**Fix:** Ensure `config_entry.add_update_listener()` is registered in coordinator init.
|
||||||
|
|
||||||
|
### Symptom: Period calculation not updating
|
||||||
|
**Check:**
|
||||||
|
1. Verify hash changes when data changes: `_compute_periods_hash()`
|
||||||
|
2. Check `_last_periods_hash` vs `current_hash`
|
||||||
|
3. Look for "Using cached period calculation" vs "Calculating periods" logs
|
||||||
|
|
||||||
|
**Fix:** Hash function may not include all relevant data. Review `_compute_periods_hash()` inputs.
|
||||||
|
|
||||||
|
### Symptom: Yesterday's prices shown as today
|
||||||
|
**Check:**
|
||||||
|
1. `is_cache_valid()` logic in `coordinator/cache.py`
|
||||||
|
2. Midnight turnover execution (Timer #2)
|
||||||
|
3. Cache clear confirmation in logs
|
||||||
|
|
||||||
|
**Fix:** Timer may not be firing. Check `_schedule_midnight_turnover()` registration.
|
||||||
|
|
||||||
|
### Symptom: Missing translations
|
||||||
|
**Check:**
|
||||||
|
1. `async_load_translations()` called at startup?
|
||||||
|
2. Translation files exist in `/translations/` and `/custom_translations/`?
|
||||||
|
3. Cache population: `_TRANSLATIONS_CACHE` keys
|
||||||
|
|
||||||
|
**Fix:** Re-install integration or restart HA to reload translation files.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Related Documentation
|
||||||
|
|
||||||
|
- **[Timer Architecture](./timer-architecture.md)** - Timer system, scheduling, midnight coordination
|
||||||
|
- **[Architecture](./architecture.md)** - Overall system design, data flow
|
||||||
|
- **[AGENTS.md](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.23.1/AGENTS.md)** - Complete reference for AI development
|
||||||
|
|
@ -0,0 +1,121 @@
|
||||||
|
---
|
||||||
|
comments: false
|
||||||
|
---
|
||||||
|
|
||||||
|
# Coding Guidelines
|
||||||
|
|
||||||
|
> **Note:** For complete coding standards, see [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.23.1/AGENTS.md).
|
||||||
|
|
||||||
|
## Code Style
|
||||||
|
|
||||||
|
- **Formatter/Linter**: Ruff (replaces Black, Flake8, isort)
|
||||||
|
- **Max line length**: 120 characters
|
||||||
|
- **Max complexity**: 25 (McCabe)
|
||||||
|
- **Target**: Python 3.13
|
||||||
|
|
||||||
|
Run before committing:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/lint # Auto-fix issues
|
||||||
|
./scripts/release/hassfest # Validate integration structure
|
||||||
|
```
|
||||||
|
|
||||||
|
## Naming Conventions
|
||||||
|
|
||||||
|
### Class Names
|
||||||
|
|
||||||
|
**All public classes MUST use the integration name as prefix.**
|
||||||
|
|
||||||
|
This is a Home Assistant standard to avoid naming conflicts between integrations.
|
||||||
|
|
||||||
|
```python
|
||||||
|
# ✅ CORRECT
|
||||||
|
class TibberPricesApiClient:
|
||||||
|
class TibberPricesDataUpdateCoordinator:
|
||||||
|
class TibberPricesSensor:
|
||||||
|
|
||||||
|
# ❌ WRONG - Missing prefix
|
||||||
|
class ApiClient:
|
||||||
|
class DataFetcher:
|
||||||
|
class TimeService:
|
||||||
|
```
|
||||||
|
|
||||||
|
**When prefix is required:**
|
||||||
|
- Public classes used across multiple modules
|
||||||
|
- All exception classes
|
||||||
|
- All coordinator and entity classes
|
||||||
|
- Data classes (dataclasses, NamedTuples) used as public APIs
|
||||||
|
|
||||||
|
**When prefix can be omitted:**
|
||||||
|
- Private helper classes within a single module (prefix with `_` underscore)
|
||||||
|
- Type aliases and callbacks (e.g., `TimeServiceCallback`)
|
||||||
|
- Small internal NamedTuples for function returns
|
||||||
|
|
||||||
|
**Private Classes:**
|
||||||
|
|
||||||
|
If a helper class is ONLY used within a single module file, prefix it with underscore:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# ✅ Private class - used only in this file
|
||||||
|
class _InternalHelper:
|
||||||
|
"""Helper used only within this module."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
# ❌ Wrong - no prefix but used across modules
|
||||||
|
class DataFetcher: # Should be TibberPricesDataFetcher
|
||||||
|
pass
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note:** Currently (Nov 2025), this project has **NO private classes** - all classes are used across module boundaries.
|
||||||
|
|
||||||
|
**Current Technical Debt:**
|
||||||
|
|
||||||
|
Many existing classes lack the `TibberPrices` prefix. Before refactoring:
|
||||||
|
1. Document the plan in `/planning/class-naming-refactoring.md`
|
||||||
|
2. Use `multi_replace_string_in_file` for bulk renames
|
||||||
|
3. Test thoroughly after each module
|
||||||
|
|
||||||
|
See [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.23.1/AGENTS.md) for complete list of classes needing rename.
|
||||||
|
|
||||||
|
## Import Order
|
||||||
|
|
||||||
|
1. Python stdlib (specific types only)
|
||||||
|
2. Third-party (`homeassistant.*`, `aiohttp`)
|
||||||
|
3. Local (`.api`, `.const`)
|
||||||
|
|
||||||
|
## Critical Patterns
|
||||||
|
|
||||||
|
### Time Handling
|
||||||
|
|
||||||
|
Always use `dt_util` from `homeassistant.util`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from homeassistant.util import dt as dt_util
|
||||||
|
|
||||||
|
price_time = dt_util.parse_datetime(starts_at)
|
||||||
|
price_time = dt_util.as_local(price_time) # Convert to HA timezone
|
||||||
|
now = dt_util.now()
|
||||||
|
```
|
||||||
|
|
||||||
|
### Translation Loading
|
||||||
|
|
||||||
|
```python
|
||||||
|
# In __init__.py async_setup_entry:
|
||||||
|
await async_load_translations(hass, "en")
|
||||||
|
await async_load_standard_translations(hass, "en")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Price Data Enrichment
|
||||||
|
|
||||||
|
Always enrich raw API data:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from .price_utils import enrich_price_info_with_differences
|
||||||
|
|
||||||
|
enriched = enrich_price_info_with_differences(
|
||||||
|
price_info_data,
|
||||||
|
thresholds,
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
See [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.23.1/AGENTS.md) for complete guidelines.
|
||||||
216
docs/developer/versioned_docs/version-v0.23.1/contributing.md
Normal file
216
docs/developer/versioned_docs/version-v0.23.1/contributing.md
Normal file
|
|
@ -0,0 +1,216 @@
|
||||||
|
# Contributing Guide
|
||||||
|
|
||||||
|
Welcome! This guide helps you contribute to the Tibber Prices integration.
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
- Git
|
||||||
|
- VS Code with Remote Containers extension
|
||||||
|
- Docker Desktop
|
||||||
|
|
||||||
|
### Fork and Clone
|
||||||
|
|
||||||
|
1. Fork the repository on GitHub
|
||||||
|
2. Clone your fork:
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/YOUR_USERNAME/hass.tibber_prices.git
|
||||||
|
cd hass.tibber_prices
|
||||||
|
```
|
||||||
|
3. Open in VS Code
|
||||||
|
4. Click "Reopen in Container" when prompted
|
||||||
|
|
||||||
|
The DevContainer will set up everything automatically.
|
||||||
|
|
||||||
|
## Development Workflow
|
||||||
|
|
||||||
|
### 1. Create a Branch
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git checkout -b feature/your-feature-name
|
||||||
|
# or
|
||||||
|
git checkout -b fix/issue-123-description
|
||||||
|
```
|
||||||
|
|
||||||
|
**Branch naming:**
|
||||||
|
- `feature/` - New features
|
||||||
|
- `fix/` - Bug fixes
|
||||||
|
- `docs/` - Documentation only
|
||||||
|
- `refactor/` - Code restructuring
|
||||||
|
- `test/` - Test improvements
|
||||||
|
|
||||||
|
### 2. Make Changes
|
||||||
|
|
||||||
|
Edit code, following [Coding Guidelines](coding-guidelines.md).
|
||||||
|
|
||||||
|
**Run checks frequently:**
|
||||||
|
```bash
|
||||||
|
./scripts/type-check # Pyright type checking
|
||||||
|
./scripts/lint # Ruff linting (auto-fix)
|
||||||
|
./scripts/test # Run tests
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Test Locally
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/develop # Start HA with integration loaded
|
||||||
|
```
|
||||||
|
|
||||||
|
Access at http://localhost:8123
|
||||||
|
|
||||||
|
### 4. Write Tests
|
||||||
|
|
||||||
|
Add tests in `/tests/` for new features:
|
||||||
|
|
||||||
|
```python
|
||||||
|
@pytest.mark.unit
|
||||||
|
async def test_your_feature(hass, coordinator):
|
||||||
|
"""Test your new feature."""
|
||||||
|
# Arrange
|
||||||
|
coordinator.data = {...}
|
||||||
|
|
||||||
|
# Act
|
||||||
|
result = your_function(coordinator.data)
|
||||||
|
|
||||||
|
# Assert
|
||||||
|
assert result == expected_value
|
||||||
|
```
|
||||||
|
|
||||||
|
Run your test:
|
||||||
|
```bash
|
||||||
|
./scripts/test tests/test_your_feature.py -v
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Commit Changes
|
||||||
|
|
||||||
|
Follow [Conventional Commits](https://www.conventionalcommits.org/):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git add .
|
||||||
|
git commit -m "feat(sensors): add volatility trend sensor
|
||||||
|
|
||||||
|
Add new sensor showing 3-hour volatility trend direction.
|
||||||
|
Includes attributes with historical volatility data.
|
||||||
|
|
||||||
|
Impact: Users can predict when prices will stabilize or continue fluctuating."
|
||||||
|
```
|
||||||
|
|
||||||
|
**Commit types:**
|
||||||
|
- `feat:` - New feature
|
||||||
|
- `fix:` - Bug fix
|
||||||
|
- `docs:` - Documentation
|
||||||
|
- `refactor:` - Code restructuring
|
||||||
|
- `test:` - Test changes
|
||||||
|
- `chore:` - Maintenance
|
||||||
|
|
||||||
|
**Add scope when relevant:**
|
||||||
|
- `feat(sensors):` - Sensor platform
|
||||||
|
- `fix(coordinator):` - Data coordinator
|
||||||
|
- `docs(user):` - User documentation
|
||||||
|
|
||||||
|
### 6. Push and Create PR
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git push origin your-branch-name
|
||||||
|
```
|
||||||
|
|
||||||
|
Then open Pull Request on GitHub.
|
||||||
|
|
||||||
|
## Pull Request Guidelines
|
||||||
|
|
||||||
|
### PR Template
|
||||||
|
|
||||||
|
Title: Short, descriptive (50 chars max)
|
||||||
|
|
||||||
|
Description should include:
|
||||||
|
```markdown
|
||||||
|
## What
|
||||||
|
Brief description of changes
|
||||||
|
|
||||||
|
## Why
|
||||||
|
Problem being solved or feature rationale
|
||||||
|
|
||||||
|
## How
|
||||||
|
Implementation approach
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
- [ ] Manual testing in Home Assistant
|
||||||
|
- [ ] Unit tests added/updated
|
||||||
|
- [ ] Type checking passes
|
||||||
|
- [ ] Linting passes
|
||||||
|
|
||||||
|
## Breaking Changes
|
||||||
|
(If any - describe migration path)
|
||||||
|
|
||||||
|
## Related Issues
|
||||||
|
Closes #123
|
||||||
|
```
|
||||||
|
|
||||||
|
### PR Checklist
|
||||||
|
|
||||||
|
Before submitting:
|
||||||
|
- [ ] Code follows [Coding Guidelines](coding-guidelines.md)
|
||||||
|
- [ ] All tests pass (`./scripts/test`)
|
||||||
|
- [ ] Type checking passes (`./scripts/type-check`)
|
||||||
|
- [ ] Linting passes (`./scripts/lint-check`)
|
||||||
|
- [ ] Documentation updated (if needed)
|
||||||
|
- [ ] AGENTS.md updated (if patterns changed)
|
||||||
|
- [ ] Commit messages follow Conventional Commits
|
||||||
|
|
||||||
|
### Review Process
|
||||||
|
|
||||||
|
1. **Automated checks** run (CI/CD)
|
||||||
|
2. **Maintainer review** (usually within 3 days)
|
||||||
|
3. **Address feedback** if requested
|
||||||
|
4. **Approval** → Maintainer merges
|
||||||
|
|
||||||
|
## Code Review Tips
|
||||||
|
|
||||||
|
### What Reviewers Look For
|
||||||
|
|
||||||
|
✅ **Good:**
|
||||||
|
- Clear, self-explanatory code
|
||||||
|
- Appropriate comments for complex logic
|
||||||
|
- Tests covering edge cases
|
||||||
|
- Type hints on all functions
|
||||||
|
- Follows existing patterns
|
||||||
|
|
||||||
|
❌ **Avoid:**
|
||||||
|
- Large PRs (>500 lines) - split into smaller ones
|
||||||
|
- Mixing unrelated changes
|
||||||
|
- Missing tests for new features
|
||||||
|
- Breaking changes without migration path
|
||||||
|
- Copy-pasted code (refactor into shared functions)
|
||||||
|
|
||||||
|
### Responding to Feedback
|
||||||
|
|
||||||
|
- Don't take it personally - we're improving code together
|
||||||
|
- Ask questions if feedback unclear
|
||||||
|
- Push additional commits to address comments
|
||||||
|
- Mark conversations as resolved when fixed
|
||||||
|
|
||||||
|
## Finding Issues to Work On
|
||||||
|
|
||||||
|
Good first issues are labeled:
|
||||||
|
- `good first issue` - Beginner-friendly
|
||||||
|
- `help wanted` - Maintainers welcome contributions
|
||||||
|
- `documentation` - Docs improvements
|
||||||
|
|
||||||
|
Comment on issue before starting work to avoid duplicates.
|
||||||
|
|
||||||
|
## Communication
|
||||||
|
|
||||||
|
- **GitHub Issues** - Bug reports, feature requests
|
||||||
|
- **Pull Requests** - Code discussion
|
||||||
|
- **Discussions** - General questions, ideas
|
||||||
|
|
||||||
|
Be respectful, constructive, and patient. We're all volunteers! 🙏
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
💡 **Related:**
|
||||||
|
- [Setup Guide](setup.md) - DevContainer setup
|
||||||
|
- [Coding Guidelines](coding-guidelines.md) - Style guide
|
||||||
|
- [Testing](testing.md) - Writing tests
|
||||||
|
- [Release Management](release-management.md) - How releases work
|
||||||
|
|
@ -0,0 +1,286 @@
|
||||||
|
---
|
||||||
|
comments: false
|
||||||
|
---
|
||||||
|
|
||||||
|
# Critical Behavior Patterns - Testing Guide
|
||||||
|
|
||||||
|
**Purpose:** This documentation lists essential behavior patterns that must be tested to ensure production-quality code and prevent resource leaks.
|
||||||
|
|
||||||
|
**Last Updated:** 2025-11-22
|
||||||
|
**Test Coverage:** 41 tests implemented (100% of critical patterns)
|
||||||
|
|
||||||
|
## 🎯 Why Are These Tests Critical?
|
||||||
|
|
||||||
|
Home Assistant integrations run **continuously** in the background. Resource leaks lead to:
|
||||||
|
- **Memory Leaks**: RAM usage grows over days/weeks until HA becomes unstable
|
||||||
|
- **Callback Leaks**: Listeners remain registered after entity removal → CPU load increases
|
||||||
|
- **Timer Leaks**: Timers continue running after unload → unnecessary background tasks
|
||||||
|
- **File Handle Leaks**: Storage files remain open → system resources exhausted
|
||||||
|
|
||||||
|
## ✅ Test Categories
|
||||||
|
|
||||||
|
### 1. Resource Cleanup (Memory Leak Prevention)
|
||||||
|
|
||||||
|
**File:** `tests/test_resource_cleanup.py`
|
||||||
|
|
||||||
|
#### 1.1 Listener Cleanup ✅
|
||||||
|
|
||||||
|
**What is tested:**
|
||||||
|
- Time-sensitive listeners are correctly removed (`async_add_time_sensitive_listener()`)
|
||||||
|
- Minute-update listeners are correctly removed (`async_add_minute_update_listener()`)
|
||||||
|
- Lifecycle callbacks are correctly unregistered (`register_lifecycle_callback()`)
|
||||||
|
- Sensor cleanup removes ALL registered listeners
|
||||||
|
- Binary sensor cleanup removes ALL registered listeners
|
||||||
|
|
||||||
|
**Why critical:**
|
||||||
|
- Each registered listener holds references to Entity + Coordinator
|
||||||
|
- Without cleanup: Entities are not freed by GC → Memory Leak
|
||||||
|
- With 80+ sensors × 3 listener types = 240+ callbacks that must be cleanly removed
|
||||||
|
|
||||||
|
**Code Locations:**
|
||||||
|
- `coordinator/listeners.py` → `async_add_time_sensitive_listener()`, `async_add_minute_update_listener()`
|
||||||
|
- `coordinator/core.py` → `register_lifecycle_callback()`
|
||||||
|
- `sensor/core.py` → `async_will_remove_from_hass()`
|
||||||
|
- `binary_sensor/core.py` → `async_will_remove_from_hass()`
|
||||||
|
|
||||||
|
#### 1.2 Timer Cleanup ✅
|
||||||
|
|
||||||
|
**What is tested:**
|
||||||
|
- Quarter-hour timer is cancelled and reference cleared
|
||||||
|
- Minute timer is cancelled and reference cleared
|
||||||
|
- Both timers are cancelled together
|
||||||
|
- Cleanup works even when timers are `None`
|
||||||
|
|
||||||
|
**Why critical:**
|
||||||
|
- Uncancelled timers continue running after integration unload
|
||||||
|
- HA's `async_track_utc_time_change()` creates persistent callbacks
|
||||||
|
- Without cleanup: Timers keep firing → CPU load + unnecessary coordinator updates
|
||||||
|
|
||||||
|
**Code Locations:**
|
||||||
|
- `coordinator/listeners.py` → `cancel_timers()`
|
||||||
|
- `coordinator/core.py` → `async_shutdown()`
|
||||||
|
|
||||||
|
#### 1.3 Config Entry Cleanup ✅
|
||||||
|
|
||||||
|
**What is tested:**
|
||||||
|
- Options update listener is registered via `async_on_unload()`
|
||||||
|
- Cleanup function is correctly passed to `async_on_unload()`
|
||||||
|
|
||||||
|
**Why critical:**
|
||||||
|
- `entry.add_update_listener()` registers permanent callback
|
||||||
|
- Without `async_on_unload()`: Listener remains active after reload → duplicate updates
|
||||||
|
- Pattern: `entry.async_on_unload(entry.add_update_listener(handler))`
|
||||||
|
|
||||||
|
**Code Locations:**
|
||||||
|
- `coordinator/core.py` → `__init__()` (listener registration)
|
||||||
|
- `__init__.py` → `async_unload_entry()`
|
||||||
|
|
||||||
|
### 2. Cache Invalidation ✅
|
||||||
|
|
||||||
|
**File:** `tests/test_resource_cleanup.py`
|
||||||
|
|
||||||
|
#### 2.1 Config Cache Invalidation
|
||||||
|
|
||||||
|
**What is tested:**
|
||||||
|
- DataTransformer config cache is invalidated on options change
|
||||||
|
- PeriodCalculator config + period cache is invalidated
|
||||||
|
- Trend calculator cache is cleared on coordinator update
|
||||||
|
|
||||||
|
**Why critical:**
|
||||||
|
- Stale config → Sensors use old user settings
|
||||||
|
- Stale period cache → Incorrect best/peak price periods
|
||||||
|
- Stale trend cache → Outdated trend analysis
|
||||||
|
|
||||||
|
**Code Locations:**
|
||||||
|
- `coordinator/data_transformation.py` → `invalidate_config_cache()`
|
||||||
|
- `coordinator/periods.py` → `invalidate_config_cache()`
|
||||||
|
- `sensor/calculators/trend.py` → `clear_trend_cache()`
|
||||||
|
|
||||||
|
### 3. Storage Cleanup ✅
|
||||||
|
|
||||||
|
**File:** `tests/test_resource_cleanup.py` + `tests/test_coordinator_shutdown.py`
|
||||||
|
|
||||||
|
#### 3.1 Persistent Storage Removal
|
||||||
|
|
||||||
|
**What is tested:**
|
||||||
|
- Storage file is deleted on config entry removal
|
||||||
|
- Cache is saved on shutdown (no data loss)
|
||||||
|
|
||||||
|
**Why critical:**
|
||||||
|
- Without storage removal: Old files remain after uninstallation
|
||||||
|
- Without cache save on shutdown: Data loss on HA restart
|
||||||
|
- Storage path: `.storage/tibber_prices.{entry_id}`
|
||||||
|
|
||||||
|
**Code Locations:**
|
||||||
|
- `__init__.py` → `async_remove_entry()`
|
||||||
|
- `coordinator/core.py` → `async_shutdown()`
|
||||||
|
|
||||||
|
### 4. Timer Scheduling ✅
|
||||||
|
|
||||||
|
**File:** `tests/test_timer_scheduling.py`
|
||||||
|
|
||||||
|
**What is tested:**
|
||||||
|
- Quarter-hour timer is registered with correct parameters
|
||||||
|
- Minute timer is registered with correct parameters
|
||||||
|
- Timers can be re-scheduled (override old timer)
|
||||||
|
- Midnight turnover detection works correctly
|
||||||
|
|
||||||
|
**Why critical:**
|
||||||
|
- Wrong timer parameters → Entities update at wrong times
|
||||||
|
- Without timer override on re-schedule → Multiple parallel timers → Performance problem
|
||||||
|
|
||||||
|
### 5. Sensor-to-Timer Assignment ✅
|
||||||
|
|
||||||
|
**File:** `tests/test_sensor_timer_assignment.py`
|
||||||
|
|
||||||
|
**What is tested:**
|
||||||
|
- All `TIME_SENSITIVE_ENTITY_KEYS` are valid entity keys
|
||||||
|
- All `MINUTE_UPDATE_ENTITY_KEYS` are valid entity keys
|
||||||
|
- Both lists are disjoint (no overlap)
|
||||||
|
- Sensor and binary sensor platforms are checked
|
||||||
|
|
||||||
|
**Why critical:**
|
||||||
|
- Wrong timer assignment → Sensors update at wrong times
|
||||||
|
- Overlap → Duplicate updates → Performance problem
|
||||||
|
|
||||||
|
## 🚨 Additional Analysis (Nice-to-Have Patterns)
|
||||||
|
|
||||||
|
These patterns were analyzed and classified as **not critical**:
|
||||||
|
|
||||||
|
### 6. Async Task Management
|
||||||
|
|
||||||
|
**Current Status:** Fire-and-forget pattern for short tasks
|
||||||
|
- `sensor/core.py` → Chart data refresh (short-lived, max 1-2 seconds)
|
||||||
|
- `coordinator/core.py` → Cache storage (short-lived, max 100ms)
|
||||||
|
|
||||||
|
**Why no tests needed:**
|
||||||
|
- No long-running tasks (all < 2 seconds)
|
||||||
|
- HA's event loop handles short tasks automatically
|
||||||
|
- Task exceptions are already logged
|
||||||
|
|
||||||
|
**If needed:** `_chart_refresh_task` tracking + cancel in `async_will_remove_from_hass()`
|
||||||
|
|
||||||
|
### 7. API Session Cleanup
|
||||||
|
|
||||||
|
**Current Status:** ✅ Correctly implemented
|
||||||
|
- `async_get_clientsession(hass)` is used (shared session)
|
||||||
|
- No new sessions are created
|
||||||
|
- HA manages session lifecycle automatically
|
||||||
|
|
||||||
|
**Code:** `api/client.py` + `__init__.py`
|
||||||
|
|
||||||
|
### 8. Translation Cache Memory
|
||||||
|
|
||||||
|
**Current Status:** ✅ Bounded cache
|
||||||
|
- Max ~5-10 languages × 5KB = 50KB total
|
||||||
|
- Module-level cache without re-loading
|
||||||
|
- Practically no memory issue
|
||||||
|
|
||||||
|
**Code:** `const.py` → `_TRANSLATIONS_CACHE`, `_STANDARD_TRANSLATIONS_CACHE`
|
||||||
|
|
||||||
|
### 9. Coordinator Data Structure Integrity
|
||||||
|
|
||||||
|
**Current Status:** Manually tested via `./scripts/develop`
|
||||||
|
- Midnight turnover works correctly (observed over several days)
|
||||||
|
- Missing keys are handled via `.get()` with defaults
|
||||||
|
- 80+ sensors access `coordinator.data` without errors
|
||||||
|
|
||||||
|
**Structure:**
|
||||||
|
```python
|
||||||
|
coordinator.data = {
|
||||||
|
"user_data": {...},
|
||||||
|
"priceInfo": [...], # Flat list of all enriched intervals
|
||||||
|
"currency": "EUR" # Top-level for easy access
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 10. Service Response Memory
|
||||||
|
|
||||||
|
**Current Status:** HA's response lifecycle
|
||||||
|
- HA automatically frees service responses after return
|
||||||
|
- ApexCharts ~20KB response is one-time per call
|
||||||
|
- No response accumulation in integration code
|
||||||
|
|
||||||
|
**Code:** `services/apexcharts.py`
|
||||||
|
|
||||||
|
## 📊 Test Coverage Status
|
||||||
|
|
||||||
|
### ✅ Implemented Tests (41 total)
|
||||||
|
|
||||||
|
| Category | Status | Tests | File | Coverage |
|
||||||
|
|----------|--------|-------|------|----------|
|
||||||
|
| Listener Cleanup | ✅ | 5 | `test_resource_cleanup.py` | 100% |
|
||||||
|
| Timer Cleanup | ✅ | 4 | `test_resource_cleanup.py` | 100% |
|
||||||
|
| Config Entry Cleanup | ✅ | 1 | `test_resource_cleanup.py` | 100% |
|
||||||
|
| Cache Invalidation | ✅ | 3 | `test_resource_cleanup.py` | 100% |
|
||||||
|
| Storage Cleanup | ✅ | 1 | `test_resource_cleanup.py` | 100% |
|
||||||
|
| Storage Persistence | ✅ | 2 | `test_coordinator_shutdown.py` | 100% |
|
||||||
|
| Timer Scheduling | ✅ | 8 | `test_timer_scheduling.py` | 100% |
|
||||||
|
| Sensor-Timer Assignment | ✅ | 17 | `test_sensor_timer_assignment.py` | 100% |
|
||||||
|
| **TOTAL** | **✅** | **41** | | **100% (critical)** |
|
||||||
|
|
||||||
|
### 📋 Analyzed but Not Implemented (Nice-to-Have)
|
||||||
|
|
||||||
|
| Category | Status | Rationale |
|
||||||
|
|----------|--------|-----------|
|
||||||
|
| Async Task Management | 📋 | Fire-and-forget pattern used (no long-running tasks) |
|
||||||
|
| API Session Cleanup | ✅ | Pattern correct (`async_get_clientsession` used) |
|
||||||
|
| Translation Cache | ✅ | Cache size bounded (~50KB max for 10 languages) |
|
||||||
|
| Data Structure Integrity | 📋 | Would add test time without finding real issues |
|
||||||
|
| Service Response Memory | 📋 | HA automatically frees service responses |
|
||||||
|
|
||||||
|
**Legend:**
|
||||||
|
- ✅ = Fully tested or pattern verified correct
|
||||||
|
- 📋 = Analyzed, low priority for testing (no known issues)
|
||||||
|
|
||||||
|
## 🎯 Development Status
|
||||||
|
|
||||||
|
### ✅ All Critical Patterns Tested
|
||||||
|
|
||||||
|
All essential memory leak prevention patterns are covered by 41 tests:
|
||||||
|
- ✅ Listeners are correctly removed (no callback leaks)
|
||||||
|
- ✅ Timers are cancelled (no background task leaks)
|
||||||
|
- ✅ Config entry cleanup works (no dangling listeners)
|
||||||
|
- ✅ Caches are invalidated (no stale data issues)
|
||||||
|
- ✅ Storage is saved and cleaned up (no data loss)
|
||||||
|
- ✅ Timer scheduling works correctly (no update issues)
|
||||||
|
- ✅ Sensor-timer assignment is correct (no wrong updates)
|
||||||
|
|
||||||
|
### 📋 Nice-to-Have Tests (Optional)
|
||||||
|
|
||||||
|
If problems arise in the future, these tests can be added:
|
||||||
|
|
||||||
|
1. **Async Task Management** - Pattern analyzed (fire-and-forget for short tasks)
|
||||||
|
2. **Data Structure Integrity** - Midnight rotation manually tested
|
||||||
|
3. **Service Response Memory** - HA's response lifecycle automatic
|
||||||
|
|
||||||
|
**Conclusion:** The integration has production-quality test coverage for all critical resource leak patterns.
|
||||||
|
|
||||||
|
## 🔍 How to Run Tests
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run all resource cleanup tests (14 tests)
|
||||||
|
./scripts/test tests/test_resource_cleanup.py -v
|
||||||
|
|
||||||
|
# Run all critical pattern tests (41 tests)
|
||||||
|
./scripts/test tests/test_resource_cleanup.py tests/test_coordinator_shutdown.py \
|
||||||
|
tests/test_timer_scheduling.py tests/test_sensor_timer_assignment.py -v
|
||||||
|
|
||||||
|
# Run all tests with coverage
|
||||||
|
./scripts/test --cov=custom_components.tibber_prices --cov-report=html
|
||||||
|
|
||||||
|
# Type checking and linting
|
||||||
|
./scripts/check
|
||||||
|
|
||||||
|
# Manual memory leak test
|
||||||
|
# 1. Start HA: ./scripts/develop
|
||||||
|
# 2. Monitor RAM: watch -n 1 'ps aux | grep home-assistant'
|
||||||
|
# 3. Reload integration multiple times (HA UI: Settings → Devices → Tibber Prices → Reload)
|
||||||
|
# 4. RAM should stabilize (not grow continuously)
|
||||||
|
```
|
||||||
|
|
||||||
|
## 📚 References
|
||||||
|
|
||||||
|
- **Home Assistant Cleanup Patterns**: https://developers.home-assistant.io/docs/integration_setup_failures/#cleanup
|
||||||
|
- **Async Best Practices**: https://developers.home-assistant.io/docs/asyncio_101/
|
||||||
|
- **Memory Profiling**: https://docs.python.org/3/library/tracemalloc.html
|
||||||
230
docs/developer/versioned_docs/version-v0.23.1/debugging.md
Normal file
230
docs/developer/versioned_docs/version-v0.23.1/debugging.md
Normal file
|
|
@ -0,0 +1,230 @@
|
||||||
|
# Debugging Guide
|
||||||
|
|
||||||
|
Tips and techniques for debugging the Tibber Prices integration during development.
|
||||||
|
|
||||||
|
## Logging
|
||||||
|
|
||||||
|
### Enable Debug Logging
|
||||||
|
|
||||||
|
Add to `configuration.yaml`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
logger:
|
||||||
|
default: info
|
||||||
|
logs:
|
||||||
|
custom_components.tibber_prices: debug
|
||||||
|
```
|
||||||
|
|
||||||
|
Restart Home Assistant to apply.
|
||||||
|
|
||||||
|
### Key Log Messages
|
||||||
|
|
||||||
|
**Coordinator Updates:**
|
||||||
|
```
|
||||||
|
[custom_components.tibber_prices.coordinator] Successfully fetched price data
|
||||||
|
[custom_components.tibber_prices.coordinator] Cache valid, using cached data
|
||||||
|
[custom_components.tibber_prices.coordinator] Midnight turnover detected, clearing cache
|
||||||
|
```
|
||||||
|
|
||||||
|
**Period Calculation:**
|
||||||
|
```
|
||||||
|
[custom_components.tibber_prices.coordinator.periods] Calculating BEST PRICE periods: flex=15.0%
|
||||||
|
[custom_components.tibber_prices.coordinator.periods] Day 2024-12-06: Found 2 periods
|
||||||
|
[custom_components.tibber_prices.coordinator.periods] Period 1: 02:00-05:00 (12 intervals)
|
||||||
|
```
|
||||||
|
|
||||||
|
**API Errors:**
|
||||||
|
```
|
||||||
|
[custom_components.tibber_prices.api] API request failed: Unauthorized
|
||||||
|
[custom_components.tibber_prices.api] Retrying (attempt 2/3) after 2.0s
|
||||||
|
```
|
||||||
|
|
||||||
|
## VS Code Debugging
|
||||||
|
|
||||||
|
### Launch Configuration
|
||||||
|
|
||||||
|
`.vscode/launch.json`:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"version": "0.2.0",
|
||||||
|
"configurations": [
|
||||||
|
{
|
||||||
|
"name": "Home Assistant",
|
||||||
|
"type": "debugpy",
|
||||||
|
"request": "launch",
|
||||||
|
"module": "homeassistant",
|
||||||
|
"args": ["-c", "config", "--debug"],
|
||||||
|
"justMyCode": false,
|
||||||
|
"env": {
|
||||||
|
"PYTHONPATH": "${workspaceFolder}/.venv/lib/python3.13/site-packages"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Set Breakpoints
|
||||||
|
|
||||||
|
**Coordinator update:**
|
||||||
|
```python
|
||||||
|
# coordinator/core.py
|
||||||
|
async def _async_update_data(self) -> dict:
|
||||||
|
"""Fetch data from API."""
|
||||||
|
breakpoint() # Or set VS Code breakpoint
|
||||||
|
```
|
||||||
|
|
||||||
|
**Period calculation:**
|
||||||
|
```python
|
||||||
|
# coordinator/period_handlers/core.py
|
||||||
|
def calculate_periods(...) -> list[dict]:
|
||||||
|
"""Calculate best/peak price periods."""
|
||||||
|
breakpoint()
|
||||||
|
```
|
||||||
|
|
||||||
|
## pytest Debugging
|
||||||
|
|
||||||
|
### Run Single Test with Output
|
||||||
|
|
||||||
|
```bash
|
||||||
|
.venv/bin/python -m pytest tests/test_period_calculation.py::test_midnight_crossing -v -s
|
||||||
|
```
|
||||||
|
|
||||||
|
**Flags:**
|
||||||
|
- `-v` - Verbose output
|
||||||
|
- `-s` - Show print statements
|
||||||
|
- `-k pattern` - Run tests matching pattern
|
||||||
|
|
||||||
|
### Debug Test in VS Code
|
||||||
|
|
||||||
|
Set breakpoint in test file, use "Debug Test" CodeLens.
|
||||||
|
|
||||||
|
### Useful Test Patterns
|
||||||
|
|
||||||
|
**Print coordinator data:**
|
||||||
|
```python
|
||||||
|
def test_something(coordinator):
|
||||||
|
print(f"Coordinator data: {coordinator.data}")
|
||||||
|
print(f"Price info count: {len(coordinator.data['priceInfo'])}")
|
||||||
|
```
|
||||||
|
|
||||||
|
**Inspect period attributes:**
|
||||||
|
```python
|
||||||
|
def test_periods(hass, coordinator):
|
||||||
|
periods = coordinator.data.get('best_price_periods', [])
|
||||||
|
for period in periods:
|
||||||
|
print(f"Period: {period['start']} to {period['end']}")
|
||||||
|
print(f" Intervals: {len(period['intervals'])}")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Common Issues
|
||||||
|
|
||||||
|
### Integration Not Loading
|
||||||
|
|
||||||
|
**Check:**
|
||||||
|
```bash
|
||||||
|
grep "tibber_prices" config/home-assistant.log
|
||||||
|
```
|
||||||
|
|
||||||
|
**Common causes:**
|
||||||
|
- Syntax error in Python code → Check logs for traceback
|
||||||
|
- Missing dependency → Run `uv sync`
|
||||||
|
- Wrong file permissions → `chmod +x scripts/*`
|
||||||
|
|
||||||
|
### Sensors Not Updating
|
||||||
|
|
||||||
|
**Check coordinator state:**
|
||||||
|
```python
|
||||||
|
# In Developer Tools > Template
|
||||||
|
{{ states.sensor.tibber_home_current_interval_price.last_updated }}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Debug in code:**
|
||||||
|
```python
|
||||||
|
# Add logging in sensor/core.py
|
||||||
|
_LOGGER.debug("Updating sensor %s: old=%s new=%s",
|
||||||
|
self.entity_id, self._attr_native_value, new_value)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Period Calculation Wrong
|
||||||
|
|
||||||
|
**Enable detailed period logs:**
|
||||||
|
```python
|
||||||
|
# coordinator/period_handlers/period_building.py
|
||||||
|
_LOGGER.debug("Candidate intervals: %s",
|
||||||
|
[(i['startsAt'], i['total']) for i in candidates])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Check filter statistics:**
|
||||||
|
```
|
||||||
|
[period_building] Flex filter blocked: 45 intervals
|
||||||
|
[period_building] Min distance blocked: 12 intervals
|
||||||
|
[period_building] Level filter blocked: 8 intervals
|
||||||
|
```
|
||||||
|
|
||||||
|
## Performance Profiling
|
||||||
|
|
||||||
|
### Time Execution
|
||||||
|
|
||||||
|
```python
|
||||||
|
import time
|
||||||
|
|
||||||
|
start = time.perf_counter()
|
||||||
|
result = expensive_function()
|
||||||
|
duration = time.perf_counter() - start
|
||||||
|
_LOGGER.debug("Function took %.3fs", duration)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Memory Usage
|
||||||
|
|
||||||
|
```python
|
||||||
|
import tracemalloc
|
||||||
|
|
||||||
|
tracemalloc.start()
|
||||||
|
# ... your code ...
|
||||||
|
current, peak = tracemalloc.get_traced_memory()
|
||||||
|
_LOGGER.debug("Memory: current=%d peak=%d", current, peak)
|
||||||
|
tracemalloc.stop()
|
||||||
|
```
|
||||||
|
|
||||||
|
### Profile with cProfile
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -m cProfile -o profile.stats -m homeassistant -c config
|
||||||
|
python -m pstats profile.stats
|
||||||
|
# Then: sort cumtime, stats 20
|
||||||
|
```
|
||||||
|
|
||||||
|
## Live Debugging in Running HA
|
||||||
|
|
||||||
|
### Remote Debugging with debugpy
|
||||||
|
|
||||||
|
Add to coordinator code:
|
||||||
|
```python
|
||||||
|
import debugpy
|
||||||
|
debugpy.listen(5678)
|
||||||
|
_LOGGER.info("Waiting for debugger attach on port 5678")
|
||||||
|
debugpy.wait_for_client()
|
||||||
|
```
|
||||||
|
|
||||||
|
Connect from VS Code with remote attach configuration.
|
||||||
|
|
||||||
|
### IPython REPL
|
||||||
|
|
||||||
|
Install in container:
|
||||||
|
```bash
|
||||||
|
uv pip install ipython
|
||||||
|
```
|
||||||
|
|
||||||
|
Add breakpoint:
|
||||||
|
```python
|
||||||
|
from IPython import embed
|
||||||
|
embed() # Drops into interactive shell
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
💡 **Related:**
|
||||||
|
- [Testing Guide](testing.md) - Writing and running tests
|
||||||
|
- [Setup Guide](setup.md) - Development environment
|
||||||
|
- [Architecture](architecture.md) - Code structure
|
||||||
185
docs/developer/versioned_docs/version-v0.23.1/intro.md
Normal file
185
docs/developer/versioned_docs/version-v0.23.1/intro.md
Normal file
|
|
@ -0,0 +1,185 @@
|
||||||
|
# Developer Documentation
|
||||||
|
|
||||||
|
This section contains documentation for contributors and maintainers of the **Tibber Prices custom integration**.
|
||||||
|
|
||||||
|
:::info Community Project
|
||||||
|
This is an independent, community-maintained custom integration for Home Assistant. It is **not** an official Tibber product and is **not** affiliated with Tibber AS.
|
||||||
|
:::
|
||||||
|
|
||||||
|
## 📚 Developer Guides
|
||||||
|
|
||||||
|
- **[Setup](setup.md)** - DevContainer, environment setup, and dependencies
|
||||||
|
- **[Architecture](architecture.md)** - Code structure, patterns, and conventions
|
||||||
|
- **[Period Calculation Theory](period-calculation-theory.md)** - Mathematical foundations, Flex/Distance interaction, Relaxation strategy
|
||||||
|
- **[Timer Architecture](timer-architecture.md)** - Timer system, scheduling, coordination (3 independent timers)
|
||||||
|
- **[Caching Strategy](caching-strategy.md)** - Cache layers, invalidation, debugging
|
||||||
|
- **[Testing](testing.md)** - How to run tests and write new test cases
|
||||||
|
- **[Release Management](release-management.md)** - Release workflow and versioning process
|
||||||
|
- **[Coding Guidelines](coding-guidelines.md)** - Style guide, linting, and best practices
|
||||||
|
- **[Refactoring Guide](refactoring-guide.md)** - How to plan and execute major refactorings
|
||||||
|
|
||||||
|
## 🤖 AI Documentation
|
||||||
|
|
||||||
|
The main AI/Copilot documentation is in [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.23.1/AGENTS.md). This file serves as long-term memory for AI assistants and contains:
|
||||||
|
|
||||||
|
- Detailed architectural patterns
|
||||||
|
- Code quality rules and conventions
|
||||||
|
- Development workflow guidance
|
||||||
|
- Common pitfalls and anti-patterns
|
||||||
|
- Project-specific patterns and utilities
|
||||||
|
|
||||||
|
**Important:** When proposing changes to patterns or conventions, always update [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.23.1/AGENTS.md) to keep AI guidance consistent.
|
||||||
|
|
||||||
|
### AI-Assisted Development
|
||||||
|
|
||||||
|
This integration is developed with extensive AI assistance (GitHub Copilot, Claude, and other AI tools). The AI handles:
|
||||||
|
|
||||||
|
- **Pattern Recognition**: Understanding and applying Home Assistant best practices
|
||||||
|
- **Code Generation**: Implementing features with proper type hints, error handling, and documentation
|
||||||
|
- **Refactoring**: Maintaining consistency across the codebase during structural changes
|
||||||
|
- **Translation Management**: Keeping 5 language files synchronized
|
||||||
|
- **Documentation**: Generating and maintaining comprehensive documentation
|
||||||
|
|
||||||
|
**Quality Assurance:**
|
||||||
|
|
||||||
|
- Automated linting with Ruff (120-char line length, max complexity 25)
|
||||||
|
- Home Assistant's type checking and validation
|
||||||
|
- Real-world testing in development environment
|
||||||
|
- Code review by maintainer before merging
|
||||||
|
|
||||||
|
**Benefits:**
|
||||||
|
|
||||||
|
- Rapid feature development while maintaining quality
|
||||||
|
- Consistent code patterns across all modules
|
||||||
|
- Comprehensive documentation maintained alongside code
|
||||||
|
- Quick bug fixes with proper understanding of context
|
||||||
|
|
||||||
|
**Limitations:**
|
||||||
|
|
||||||
|
- AI may occasionally miss edge cases or subtle bugs
|
||||||
|
- Some complex Home Assistant patterns may need human review
|
||||||
|
- Translation quality depends on AI's understanding of target language
|
||||||
|
- User feedback is crucial for discovering real-world issues
|
||||||
|
|
||||||
|
If you're working with AI tools on this project, the [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.23.1/AGENTS.md) file provides the context and patterns that ensure consistency.
|
||||||
|
|
||||||
|
## 🚀 Quick Start for Contributors
|
||||||
|
|
||||||
|
1. **Fork and clone** the repository
|
||||||
|
2. **Open in DevContainer** (VS Code: "Reopen in Container")
|
||||||
|
3. **Run setup**: `./scripts/setup/setup` (happens automatically via `postCreateCommand`)
|
||||||
|
4. **Start development environment**: `./scripts/develop`
|
||||||
|
5. **Make your changes** following the [Coding Guidelines](coding-guidelines.md)
|
||||||
|
6. **Run linting**: `./scripts/lint`
|
||||||
|
7. **Validate integration**: `./scripts/release/hassfest`
|
||||||
|
8. **Test your changes** in the running Home Assistant instance
|
||||||
|
9. **Commit using Conventional Commits** format
|
||||||
|
10. **Open a Pull Request** with clear description
|
||||||
|
|
||||||
|
## 🛠️ Development Tools
|
||||||
|
|
||||||
|
The project includes several helper scripts in `./scripts/`:
|
||||||
|
|
||||||
|
- `bootstrap` - Initial setup of dependencies
|
||||||
|
- `develop` - Start Home Assistant in debug mode (auto-cleans .egg-info)
|
||||||
|
- `clean` - Remove build artifacts and caches
|
||||||
|
- `lint` - Auto-fix code issues with ruff
|
||||||
|
- `lint-check` - Check code without modifications (CI mode)
|
||||||
|
- `hassfest` - Validate integration structure (JSON, Python syntax, required files)
|
||||||
|
- `setup` - Install development tools (git-cliff, @github/copilot)
|
||||||
|
- `prepare-release` - Prepare a new release (bump version, create tag)
|
||||||
|
- `generate-release-notes` - Generate release notes from commits
|
||||||
|
|
||||||
|
## 📦 Project Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
custom_components/tibber_prices/
|
||||||
|
├── __init__.py # Integration setup
|
||||||
|
├── coordinator.py # Data update coordinator with caching
|
||||||
|
├── api.py # Tibber GraphQL API client
|
||||||
|
├── price_utils.py # Price enrichment functions
|
||||||
|
├── average_utils.py # Average calculation utilities
|
||||||
|
├── sensor/ # Sensor platform (package)
|
||||||
|
│ ├── __init__.py # Platform setup
|
||||||
|
│ ├── core.py # TibberPricesSensor class
|
||||||
|
│ ├── definitions.py # Entity descriptions
|
||||||
|
│ ├── helpers.py # Pure helper functions
|
||||||
|
│ └── attributes.py # Attribute builders
|
||||||
|
├── binary_sensor.py # Binary sensor platform
|
||||||
|
├── entity_utils/ # Shared entity helpers
|
||||||
|
│ ├── icons.py # Icon mapping logic
|
||||||
|
│ ├── colors.py # Color mapping logic
|
||||||
|
│ └── attributes.py # Common attribute builders
|
||||||
|
├── services.py # Custom services
|
||||||
|
├── config_flow.py # UI configuration flow
|
||||||
|
├── const.py # Constants and helpers
|
||||||
|
├── translations/ # Standard HA translations
|
||||||
|
└── custom_translations/ # Extended translations (descriptions)
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🔍 Key Concepts
|
||||||
|
|
||||||
|
**DataUpdateCoordinator Pattern:**
|
||||||
|
|
||||||
|
- Centralized data fetching and caching
|
||||||
|
- Automatic entity updates on data changes
|
||||||
|
- Persistent storage via `Store`
|
||||||
|
- Quarter-hour boundary refresh scheduling
|
||||||
|
|
||||||
|
**Price Data Enrichment:**
|
||||||
|
|
||||||
|
- Raw API data is enriched with statistical analysis
|
||||||
|
- Trailing/leading 24h averages calculated per interval
|
||||||
|
- Price differences and ratings added
|
||||||
|
- All via pure functions in `price_utils.py`
|
||||||
|
|
||||||
|
**Translation System:**
|
||||||
|
|
||||||
|
- Dual system: `/translations/` (HA schema) + `/custom_translations/` (extended)
|
||||||
|
- Both must stay in sync across all languages (de, en, nb, nl, sv)
|
||||||
|
- Async loading at integration setup
|
||||||
|
|
||||||
|
## 🧪 Testing
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Validate integration structure
|
||||||
|
./scripts/release/hassfest
|
||||||
|
|
||||||
|
# Run all tests
|
||||||
|
pytest tests/
|
||||||
|
|
||||||
|
# Run specific test file
|
||||||
|
pytest tests/test_coordinator.py
|
||||||
|
|
||||||
|
# Run with coverage
|
||||||
|
pytest --cov=custom_components.tibber_prices tests/
|
||||||
|
```
|
||||||
|
|
||||||
|
## 📝 Documentation Standards
|
||||||
|
|
||||||
|
Documentation is organized in two Docusaurus sites:
|
||||||
|
|
||||||
|
- **User docs** (`docs/user/`): Installation, configuration, usage guides
|
||||||
|
- Markdown files in `docs/user/docs/*.md`
|
||||||
|
- Navigation managed via `docs/user/sidebars.ts`
|
||||||
|
- **Developer docs** (`docs/developer/`): Architecture, patterns, contribution guides
|
||||||
|
- Markdown files in `docs/developer/docs/*.md`
|
||||||
|
- Navigation managed via `docs/developer/sidebars.ts`
|
||||||
|
- **AI guidance**: `AGENTS.md` (patterns, conventions, long-term memory)
|
||||||
|
|
||||||
|
**Best practices:**
|
||||||
|
- Use clear examples and code snippets
|
||||||
|
- Keep docs up-to-date with code changes
|
||||||
|
- Add new pages to appropriate `sidebars.ts` for navigation
|
||||||
|
|
||||||
|
## 🤝 Contributing
|
||||||
|
|
||||||
|
See [CONTRIBUTING.md](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.23.1/CONTRIBUTING.md) for detailed contribution guidelines, code of conduct, and pull request process.
|
||||||
|
|
||||||
|
## 📄 License
|
||||||
|
|
||||||
|
This project is licensed under the [MIT License](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.23.1/LICENSE).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Note:** This documentation is for developers. End users should refer to the [User Documentation](https://jpawlowski.github.io/hass.tibber_prices/user/).
|
||||||
322
docs/developer/versioned_docs/version-v0.23.1/performance.md
Normal file
322
docs/developer/versioned_docs/version-v0.23.1/performance.md
Normal file
|
|
@ -0,0 +1,322 @@
|
||||||
|
# Performance Optimization
|
||||||
|
|
||||||
|
Guidelines for maintaining and improving integration performance.
|
||||||
|
|
||||||
|
## Performance Goals
|
||||||
|
|
||||||
|
Target metrics:
|
||||||
|
- **Coordinator update**: <500ms (typical: 200-300ms)
|
||||||
|
- **Sensor update**: <10ms per sensor
|
||||||
|
- **Period calculation**: <100ms (typical: 20-50ms)
|
||||||
|
- **Memory footprint**: <10MB per home
|
||||||
|
- **API calls**: <100 per day per home
|
||||||
|
|
||||||
|
## Profiling
|
||||||
|
|
||||||
|
### Timing Decorator
|
||||||
|
|
||||||
|
Use for performance-critical functions:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import time
|
||||||
|
import functools
|
||||||
|
|
||||||
|
def timing(func):
|
||||||
|
@functools.wraps(func)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
start = time.perf_counter()
|
||||||
|
result = func(*args, **kwargs)
|
||||||
|
duration = time.perf_counter() - start
|
||||||
|
_LOGGER.debug("%s took %.3fms", func.__name__, duration * 1000)
|
||||||
|
return result
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
@timing
|
||||||
|
def expensive_calculation():
|
||||||
|
# Your code here
|
||||||
|
```
|
||||||
|
|
||||||
|
### Memory Profiling
|
||||||
|
|
||||||
|
```python
|
||||||
|
import tracemalloc
|
||||||
|
|
||||||
|
tracemalloc.start()
|
||||||
|
# Run your code
|
||||||
|
current, peak = tracemalloc.get_traced_memory()
|
||||||
|
_LOGGER.info("Memory: current=%.2fMB peak=%.2fMB",
|
||||||
|
current / 1024**2, peak / 1024**2)
|
||||||
|
tracemalloc.stop()
|
||||||
|
```
|
||||||
|
|
||||||
|
### Async Profiling
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install aioprof
|
||||||
|
uv pip install aioprof
|
||||||
|
|
||||||
|
# Run with profiling
|
||||||
|
python -m aioprof homeassistant -c config
|
||||||
|
```
|
||||||
|
|
||||||
|
## Optimization Patterns
|
||||||
|
|
||||||
|
### Caching
|
||||||
|
|
||||||
|
**1. Persistent Cache** (API data):
|
||||||
|
```python
|
||||||
|
# Already implemented in coordinator/cache.py
|
||||||
|
store = Store(hass, STORAGE_VERSION, STORAGE_KEY)
|
||||||
|
data = await store.async_load()
|
||||||
|
```
|
||||||
|
|
||||||
|
**2. Translation Cache** (in-memory):
|
||||||
|
```python
|
||||||
|
# Already implemented in const.py
|
||||||
|
_TRANSLATION_CACHE: dict[str, dict] = {}
|
||||||
|
|
||||||
|
def get_translation(path: str, language: str) -> dict:
|
||||||
|
cache_key = f"{path}_{language}"
|
||||||
|
if cache_key not in _TRANSLATION_CACHE:
|
||||||
|
_TRANSLATION_CACHE[cache_key] = load_translation(path, language)
|
||||||
|
return _TRANSLATION_CACHE[cache_key]
|
||||||
|
```
|
||||||
|
|
||||||
|
**3. Config Cache** (invalidated on options change):
|
||||||
|
```python
|
||||||
|
class DataTransformer:
|
||||||
|
def __init__(self):
|
||||||
|
self._config_cache: dict | None = None
|
||||||
|
|
||||||
|
def get_config(self) -> dict:
|
||||||
|
if self._config_cache is None:
|
||||||
|
self._config_cache = self._build_config()
|
||||||
|
return self._config_cache
|
||||||
|
|
||||||
|
def invalidate_config_cache(self):
|
||||||
|
self._config_cache = None
|
||||||
|
```
|
||||||
|
|
||||||
|
### Lazy Loading
|
||||||
|
|
||||||
|
**Load data only when needed:**
|
||||||
|
```python
|
||||||
|
@property
|
||||||
|
def extra_state_attributes(self) -> dict | None:
|
||||||
|
"""Return attributes."""
|
||||||
|
# Calculate only when accessed
|
||||||
|
if self.entity_description.key == "complex_sensor":
|
||||||
|
return self._calculate_complex_attributes()
|
||||||
|
return None
|
||||||
|
```
|
||||||
|
|
||||||
|
### Bulk Operations
|
||||||
|
|
||||||
|
**Process multiple items at once:**
|
||||||
|
```python
|
||||||
|
# ❌ Slow - loop with individual operations
|
||||||
|
for interval in intervals:
|
||||||
|
enriched = enrich_single_interval(interval)
|
||||||
|
results.append(enriched)
|
||||||
|
|
||||||
|
# ✅ Fast - bulk processing
|
||||||
|
results = enrich_intervals_bulk(intervals)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Async Best Practices
|
||||||
|
|
||||||
|
**1. Concurrent API calls:**
|
||||||
|
```python
|
||||||
|
# ❌ Sequential (slow)
|
||||||
|
user_data = await fetch_user_data()
|
||||||
|
price_data = await fetch_price_data()
|
||||||
|
|
||||||
|
# ✅ Concurrent (fast)
|
||||||
|
user_data, price_data = await asyncio.gather(
|
||||||
|
fetch_user_data(),
|
||||||
|
fetch_price_data()
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
**2. Don't block event loop:**
|
||||||
|
```python
|
||||||
|
# ❌ Blocking
|
||||||
|
result = heavy_computation() # Blocks for seconds
|
||||||
|
|
||||||
|
# ✅ Non-blocking
|
||||||
|
result = await hass.async_add_executor_job(heavy_computation)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Memory Management
|
||||||
|
|
||||||
|
### Avoid Memory Leaks
|
||||||
|
|
||||||
|
**1. Clear references:**
|
||||||
|
```python
|
||||||
|
class Coordinator:
|
||||||
|
async def async_shutdown(self):
|
||||||
|
"""Clean up resources."""
|
||||||
|
self._listeners.clear()
|
||||||
|
self._data = None
|
||||||
|
self._cache = None
|
||||||
|
```
|
||||||
|
|
||||||
|
**2. Use weak references for callbacks:**
|
||||||
|
```python
|
||||||
|
import weakref
|
||||||
|
|
||||||
|
class Manager:
|
||||||
|
def __init__(self):
|
||||||
|
self._callbacks: list[weakref.ref] = []
|
||||||
|
|
||||||
|
def register(self, callback):
|
||||||
|
self._callbacks.append(weakref.ref(callback))
|
||||||
|
```
|
||||||
|
|
||||||
|
### Efficient Data Structures
|
||||||
|
|
||||||
|
**Use appropriate types:**
|
||||||
|
```python
|
||||||
|
# ❌ List for lookups (O(n))
|
||||||
|
if timestamp in timestamp_list:
|
||||||
|
...
|
||||||
|
|
||||||
|
# ✅ Set for lookups (O(1))
|
||||||
|
if timestamp in timestamp_set:
|
||||||
|
...
|
||||||
|
|
||||||
|
# ❌ List comprehension with filter
|
||||||
|
results = [x for x in items if condition(x)]
|
||||||
|
|
||||||
|
# ✅ Generator for large datasets
|
||||||
|
results = (x for x in items if condition(x))
|
||||||
|
```
|
||||||
|
|
||||||
|
## Coordinator Optimization
|
||||||
|
|
||||||
|
### Minimize API Calls
|
||||||
|
|
||||||
|
**Already implemented:**
|
||||||
|
- Cache valid until midnight
|
||||||
|
- User data cached for 24h
|
||||||
|
- Only poll when tomorrow data expected
|
||||||
|
|
||||||
|
**Monitor API usage:**
|
||||||
|
```python
|
||||||
|
_LOGGER.debug("API call: %s (cache_age=%s)",
|
||||||
|
endpoint, cache_age)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Smart Updates
|
||||||
|
|
||||||
|
**Only update when needed:**
|
||||||
|
```python
|
||||||
|
async def _async_update_data(self) -> dict:
|
||||||
|
"""Fetch data from API."""
|
||||||
|
if self._is_cache_valid():
|
||||||
|
_LOGGER.debug("Using cached data")
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
# Fetch new data
|
||||||
|
return await self._fetch_data()
|
||||||
|
```
|
||||||
|
|
||||||
|
## Database Impact
|
||||||
|
|
||||||
|
### State Class Selection
|
||||||
|
|
||||||
|
**Affects long-term statistics storage:**
|
||||||
|
```python
|
||||||
|
# ❌ MEASUREMENT for prices (stores every change)
|
||||||
|
state_class=SensorStateClass.MEASUREMENT # ~35K records/year
|
||||||
|
|
||||||
|
# ✅ None for prices (no long-term stats)
|
||||||
|
state_class=None # Only current state
|
||||||
|
|
||||||
|
# ✅ TOTAL for counters only
|
||||||
|
state_class=SensorStateClass.TOTAL # For cumulative values
|
||||||
|
```
|
||||||
|
|
||||||
|
### Attribute Size
|
||||||
|
|
||||||
|
**Keep attributes minimal:**
|
||||||
|
```python
|
||||||
|
# ❌ Large nested structures (KB per update)
|
||||||
|
attributes = {
|
||||||
|
"all_intervals": [...], # 384 intervals
|
||||||
|
"full_history": [...], # Days of data
|
||||||
|
}
|
||||||
|
|
||||||
|
# ✅ Essential data only (bytes per update)
|
||||||
|
attributes = {
|
||||||
|
"timestamp": "...",
|
||||||
|
"rating_level": "...",
|
||||||
|
"next_interval": "...",
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Testing Performance
|
||||||
|
|
||||||
|
### Benchmark Tests
|
||||||
|
|
||||||
|
```python
|
||||||
|
import pytest
|
||||||
|
import time
|
||||||
|
|
||||||
|
@pytest.mark.benchmark
|
||||||
|
def test_period_calculation_performance(coordinator):
|
||||||
|
"""Period calculation should complete in <100ms."""
|
||||||
|
start = time.perf_counter()
|
||||||
|
|
||||||
|
periods = calculate_periods(coordinator.data)
|
||||||
|
|
||||||
|
duration = time.perf_counter() - start
|
||||||
|
assert duration < 0.1, f"Too slow: {duration:.3f}s"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Load Testing
|
||||||
|
|
||||||
|
```python
|
||||||
|
@pytest.mark.integration
|
||||||
|
async def test_multiple_homes_performance(hass):
|
||||||
|
"""Test with 10 homes."""
|
||||||
|
coordinators = []
|
||||||
|
for i in range(10):
|
||||||
|
coordinator = create_coordinator(hass, home_id=f"home_{i}")
|
||||||
|
await coordinator.async_refresh()
|
||||||
|
coordinators.append(coordinator)
|
||||||
|
|
||||||
|
# Verify memory usage
|
||||||
|
# Verify update times
|
||||||
|
```
|
||||||
|
|
||||||
|
## Monitoring in Production
|
||||||
|
|
||||||
|
### Log Performance Metrics
|
||||||
|
|
||||||
|
```python
|
||||||
|
@timing
|
||||||
|
async def _async_update_data(self) -> dict:
|
||||||
|
"""Fetch data with timing."""
|
||||||
|
result = await self._fetch_data()
|
||||||
|
_LOGGER.info("Update completed in %.2fs", timing_duration)
|
||||||
|
return result
|
||||||
|
```
|
||||||
|
|
||||||
|
### Memory Tracking
|
||||||
|
|
||||||
|
```python
|
||||||
|
import psutil
|
||||||
|
import os
|
||||||
|
|
||||||
|
process = psutil.Process(os.getpid())
|
||||||
|
memory_mb = process.memory_info().rss / 1024**2
|
||||||
|
_LOGGER.debug("Current memory usage: %.2f MB", memory_mb)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
💡 **Related:**
|
||||||
|
- [Caching Strategy](caching-strategy.md) - Cache layers
|
||||||
|
- [Architecture](architecture.md) - System design
|
||||||
|
- [Debugging](debugging.md) - Profiling tools
|
||||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,290 @@
|
||||||
|
# Recorder History Optimization
|
||||||
|
|
||||||
|
**Status**: ✅ IMPLEMENTED
|
||||||
|
**Last Updated**: 2025-12-07
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This document describes the implementation of `_unrecorded_attributes` for Tibber Prices entities to prevent Home Assistant Recorder database bloat by excluding non-essential attributes from historical data storage.
|
||||||
|
|
||||||
|
**Reference**: [HA Developer Docs - Excluding State Attributes](https://developers.home-assistant.io/docs/core/entity/#excluding-state-attributes-from-recorder-history)
|
||||||
|
|
||||||
|
## Implementation
|
||||||
|
|
||||||
|
Both `TibberPricesSensor` and `TibberPricesBinarySensor` implement `_unrecorded_attributes` as a class-level `frozenset` to exclude attributes that don't provide value in historical data analysis.
|
||||||
|
|
||||||
|
### Pattern
|
||||||
|
|
||||||
|
```python
|
||||||
|
class TibberPricesSensor(TibberPricesEntity, SensorEntity):
|
||||||
|
"""tibber_prices Sensor class."""
|
||||||
|
|
||||||
|
_unrecorded_attributes = frozenset(
|
||||||
|
{
|
||||||
|
"description",
|
||||||
|
"usage_tips",
|
||||||
|
# ... more attributes
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Key Points:**
|
||||||
|
- Must be a **class attribute** (not instance attribute)
|
||||||
|
- Use `frozenset` for immutability and performance
|
||||||
|
- Applied automatically by Home Assistant's Recorder component
|
||||||
|
|
||||||
|
## Categories of Excluded Attributes
|
||||||
|
|
||||||
|
### 1. Descriptions/Help Text
|
||||||
|
|
||||||
|
**Attributes:** `description`, `usage_tips`
|
||||||
|
|
||||||
|
**Reason:** Static, large text strings (100-500 chars each) that:
|
||||||
|
- Never change or change very rarely
|
||||||
|
- Don't provide analytical value in history
|
||||||
|
- Consume significant database space when recorded every state change
|
||||||
|
- Can be retrieved from translation files when needed
|
||||||
|
|
||||||
|
**Impact:** ~500-1000 bytes saved per state change
|
||||||
|
|
||||||
|
### 2. Large Nested Structures
|
||||||
|
|
||||||
|
**Attributes:**
|
||||||
|
- `periods` (binary_sensor) - Array of all period summaries
|
||||||
|
- `data` (chart_data_export) - Complete price data arrays
|
||||||
|
- `trend_attributes` - Detailed trend analysis
|
||||||
|
- `current_trend_attributes` - Current trend details
|
||||||
|
- `trend_change_attributes` - Trend change analysis
|
||||||
|
- `volatility_attributes` - Detailed volatility breakdown
|
||||||
|
|
||||||
|
**Reason:** Complex nested data structures that are:
|
||||||
|
- Serialized to JSON for storage (expensive)
|
||||||
|
- Create large database rows (2-20 KB each)
|
||||||
|
- Slow down history queries
|
||||||
|
- Provide limited value in historical analysis (current state usually sufficient)
|
||||||
|
|
||||||
|
**Impact:** ~10-30 KB saved per state change for affected sensors
|
||||||
|
|
||||||
|
**Example - periods array:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"periods": [
|
||||||
|
{
|
||||||
|
"start": "2025-12-07T06:00:00+01:00",
|
||||||
|
"end": "2025-12-07T08:00:00+01:00",
|
||||||
|
"duration_minutes": 120,
|
||||||
|
"price_mean": 18.5,
|
||||||
|
"price_median": 18.3,
|
||||||
|
"price_min": 17.2,
|
||||||
|
"price_max": 19.8,
|
||||||
|
// ... 10+ more attributes × 10-20 periods
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Frequently Changing Diagnostics
|
||||||
|
|
||||||
|
**Attributes:** `icon_color`, `cache_age`, `cache_validity`, `data_completeness`, `data_status`
|
||||||
|
|
||||||
|
**Reason:**
|
||||||
|
- Change every update cycle (every 15 minutes or more frequently)
|
||||||
|
- Don't provide long-term analytical value
|
||||||
|
- Create state changes even when core values haven't changed
|
||||||
|
- Clutter history with cosmetic changes
|
||||||
|
- Can be reconstructed from other attributes if needed
|
||||||
|
|
||||||
|
**Impact:** Prevents unnecessary state writes when only cosmetic attributes change
|
||||||
|
|
||||||
|
**Example:** `icon_color` changes from `#00ff00` to `#ffff00` but price hasn't changed → No state write needed
|
||||||
|
|
||||||
|
### 4. Static/Rarely Changing Configuration
|
||||||
|
|
||||||
|
**Attributes:** `tomorrow_expected_after`, `level_value`, `rating_value`, `level_id`, `rating_id`, `currency`, `resolution`, `yaxis_min`, `yaxis_max`
|
||||||
|
|
||||||
|
**Reason:**
|
||||||
|
- Configuration values that rarely change
|
||||||
|
- Wastes space when recorded repeatedly
|
||||||
|
- Can be derived from other attributes or from entity state
|
||||||
|
|
||||||
|
**Impact:** ~100-200 bytes saved per state change
|
||||||
|
|
||||||
|
### 5. Temporary/Time-Bound Data
|
||||||
|
|
||||||
|
**Attributes:** `next_api_poll`, `next_midnight_turnover`, `last_api_fetch`, `last_cache_update`, `last_turnover`, `last_error`, `error`
|
||||||
|
|
||||||
|
**Reason:**
|
||||||
|
- Only relevant at moment of reading
|
||||||
|
- Won't be valid after some time
|
||||||
|
- Similar to `entity_picture` in HA core image entities
|
||||||
|
- Superseded by next update
|
||||||
|
|
||||||
|
**Impact:** ~200-400 bytes saved per state change
|
||||||
|
|
||||||
|
**Example:** `next_api_poll: "2025-12-07T14:30:00"` stored at 14:15 is useless when viewing history at 15:00
|
||||||
|
|
||||||
|
### 6. Relaxation Details
|
||||||
|
|
||||||
|
**Attributes:** `relaxation_level`, `relaxation_threshold_original_%`, `relaxation_threshold_applied_%`
|
||||||
|
|
||||||
|
**Reason:**
|
||||||
|
- Detailed technical information not needed for historical analysis
|
||||||
|
- Only useful for debugging during active development
|
||||||
|
- Boolean `relaxation_active` is kept for high-level analysis
|
||||||
|
|
||||||
|
**Impact:** ~50-100 bytes saved per state change
|
||||||
|
|
||||||
|
### 7. Redundant/Derived Data
|
||||||
|
|
||||||
|
**Attributes:** `price_spread`, `volatility`, `diff_%`, `rating_difference_%`, `period_price_diff_from_daily_min`, `period_price_diff_from_daily_min_%`, `periods_total`, `periods_remaining`
|
||||||
|
|
||||||
|
**Reason:**
|
||||||
|
- Can be calculated from other attributes
|
||||||
|
- Redundant information
|
||||||
|
- Doesn't add analytical value to history
|
||||||
|
|
||||||
|
**Impact:** ~100-200 bytes saved per state change
|
||||||
|
|
||||||
|
**Example:** `price_spread = price_max - price_min` (both are recorded, so spread can be calculated)
|
||||||
|
|
||||||
|
## Attributes That ARE Recorded
|
||||||
|
|
||||||
|
These attributes **remain in history** because they provide essential analytical value:
|
||||||
|
|
||||||
|
### Time-Series Core
|
||||||
|
- `timestamp` - Critical for time-series analysis (ALWAYS FIRST)
|
||||||
|
- All price values - Core sensor states
|
||||||
|
|
||||||
|
### Diagnostics & Tracking
|
||||||
|
- `cache_age_minutes` - Numeric value for diagnostics tracking over time
|
||||||
|
- `updates_today` - Tracking API usage patterns
|
||||||
|
|
||||||
|
### Data Completeness
|
||||||
|
- `interval_count`, `intervals_available` - Data completeness metrics
|
||||||
|
- `yesterday_available`, `today_available`, `tomorrow_available` - Boolean status
|
||||||
|
|
||||||
|
### Period Data
|
||||||
|
- `start`, `end`, `duration_minutes` - Core period timing
|
||||||
|
- `price_mean`, `price_median`, `price_min`, `price_max` - Core price statistics
|
||||||
|
|
||||||
|
### High-Level Status
|
||||||
|
- `relaxation_active` - Whether relaxation was used (boolean, useful for analyzing when periods needed relaxation)
|
||||||
|
|
||||||
|
## Expected Database Impact
|
||||||
|
|
||||||
|
### Space Savings
|
||||||
|
|
||||||
|
**Per state change:**
|
||||||
|
- Before: ~3-8 KB average
|
||||||
|
- After: ~0.5-1.5 KB average
|
||||||
|
- **Reduction: 60-85%**
|
||||||
|
|
||||||
|
**Daily per sensor:**
|
||||||
|
| Sensor Type | Updates/Day | Before | After | Savings |
|
||||||
|
|------------|-------------|--------|-------|---------|
|
||||||
|
| High-frequency (15min) | 96 | ~290 KB | ~140 KB | 50% |
|
||||||
|
| Low-frequency (6h) | 4 | ~32 KB | ~6 KB | 80% |
|
||||||
|
|
||||||
|
### Most Impactful Exclusions
|
||||||
|
|
||||||
|
1. **`periods` array** (binary_sensor) - Saves 2-5 KB per state
|
||||||
|
2. **`data`** (chart_data_export) - Saves 5-20 KB per state
|
||||||
|
3. **`trend_attributes`** - Saves 1-2 KB per state
|
||||||
|
4. **`description`/`usage_tips`** - Saves 500-1000 bytes per state
|
||||||
|
5. **`icon_color`** - Prevents unnecessary state changes
|
||||||
|
|
||||||
|
### Real-World Impact
|
||||||
|
|
||||||
|
For a typical installation with:
|
||||||
|
- 80+ sensors
|
||||||
|
- Updates every 15 minutes
|
||||||
|
- ~10 sensors updating every minute
|
||||||
|
|
||||||
|
**Before:** ~1.5 GB per month
|
||||||
|
**After:** ~400-500 MB per month
|
||||||
|
**Savings:** ~1 GB per month (~66% reduction)
|
||||||
|
|
||||||
|
## Implementation Files
|
||||||
|
|
||||||
|
- **Sensor Platform**: `custom_components/tibber_prices/sensor/core.py`
|
||||||
|
- Class: `TibberPricesSensor`
|
||||||
|
- 47 attributes excluded
|
||||||
|
|
||||||
|
- **Binary Sensor Platform**: `custom_components/tibber_prices/binary_sensor/core.py`
|
||||||
|
- Class: `TibberPricesBinarySensor`
|
||||||
|
- 30 attributes excluded
|
||||||
|
|
||||||
|
## When to Update _unrecorded_attributes
|
||||||
|
|
||||||
|
### Add to Exclusion List When:
|
||||||
|
|
||||||
|
✅ Adding new **description/help text** attributes
|
||||||
|
✅ Adding **large nested structures** (arrays, complex objects)
|
||||||
|
✅ Adding **frequently changing diagnostic info** (colors, formatted strings)
|
||||||
|
✅ Adding **temporary/time-bound data** (timestamps that become stale)
|
||||||
|
✅ Adding **redundant/derived calculations**
|
||||||
|
|
||||||
|
### Keep in History When:
|
||||||
|
|
||||||
|
✅ **Core price/timing data** needed for analysis
|
||||||
|
✅ **Boolean status flags** that show state transitions
|
||||||
|
✅ **Numeric counters** useful for tracking patterns
|
||||||
|
✅ **Data that helps understand system behavior** over time
|
||||||
|
|
||||||
|
## Decision Framework
|
||||||
|
|
||||||
|
When adding a new attribute, ask:
|
||||||
|
|
||||||
|
1. **Will this be useful in history queries 1 week from now?**
|
||||||
|
- No → Exclude
|
||||||
|
- Yes → Keep
|
||||||
|
|
||||||
|
2. **Can this be calculated from other recorded attributes?**
|
||||||
|
- Yes → Exclude
|
||||||
|
- No → Keep
|
||||||
|
|
||||||
|
3. **Is this primarily for current UI display?**
|
||||||
|
- Yes → Exclude
|
||||||
|
- No → Keep
|
||||||
|
|
||||||
|
4. **Does this change frequently without indicating state change?**
|
||||||
|
- Yes → Exclude
|
||||||
|
- No → Keep
|
||||||
|
|
||||||
|
5. **Is this larger than 100 bytes and not essential for analysis?**
|
||||||
|
- Yes → Exclude
|
||||||
|
- No → Keep
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
After modifying `_unrecorded_attributes`:
|
||||||
|
|
||||||
|
1. **Restart Home Assistant** to apply changes
|
||||||
|
2. **Check Recorder database size** before/after
|
||||||
|
3. **Verify essential attributes** still appear in history
|
||||||
|
4. **Confirm excluded attributes** don't appear in new state writes
|
||||||
|
|
||||||
|
**SQL Query to check attribute presence:**
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
state_id,
|
||||||
|
attributes
|
||||||
|
FROM states
|
||||||
|
WHERE entity_id = 'sensor.tibber_home_current_interval_price'
|
||||||
|
ORDER BY last_updated DESC
|
||||||
|
LIMIT 5;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Maintenance Notes
|
||||||
|
|
||||||
|
- ✅ Must be a **class attribute** (instance attributes are ignored)
|
||||||
|
- ✅ Use `frozenset` for immutability
|
||||||
|
- ✅ Only affects **new** state writes (doesn't purge existing history)
|
||||||
|
- ✅ Attributes still available via `entity.attributes` in templates/automations
|
||||||
|
- ✅ Only prevents **storage** in Recorder, not runtime availability
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- [HA Developer Docs - Excluding State Attributes](https://developers.home-assistant.io/docs/core/entity/#excluding-state-attributes-from-recorder-history)
|
||||||
|
- Implementation PR: [Link when merged]
|
||||||
|
- Related Issue: [Link if applicable]
|
||||||
|
|
@ -0,0 +1,414 @@
|
||||||
|
# Refactoring Guide
|
||||||
|
|
||||||
|
This guide explains how to plan and execute major refactorings in this project.
|
||||||
|
|
||||||
|
## When to Plan a Refactoring
|
||||||
|
|
||||||
|
Not every code change needs a detailed plan. Create a refactoring plan when:
|
||||||
|
|
||||||
|
🔴 **Major changes requiring planning:**
|
||||||
|
|
||||||
|
- Splitting modules into packages (>5 files affected, >500 lines moved)
|
||||||
|
- Architectural changes (new packages, module restructuring)
|
||||||
|
- Breaking changes (API changes, config format migrations)
|
||||||
|
|
||||||
|
🟡 **Medium changes that might benefit from planning:**
|
||||||
|
|
||||||
|
- Complex features with multiple moving parts
|
||||||
|
- Changes affecting many files (>3 files, unclear best approach)
|
||||||
|
- Refactorings with unclear scope
|
||||||
|
|
||||||
|
🟢 **Small changes - no planning needed:**
|
||||||
|
|
||||||
|
- Bug fixes (straightforward, `<`100 lines)
|
||||||
|
- Small features (`<`3 files, clear approach)
|
||||||
|
- Documentation updates
|
||||||
|
- Cosmetic changes (formatting, renaming)
|
||||||
|
|
||||||
|
## The Planning Process
|
||||||
|
|
||||||
|
### 1. Create a Planning Document
|
||||||
|
|
||||||
|
Create a file in the `planning/` directory (git-ignored for free iteration):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Example:
|
||||||
|
touch planning/my-feature-refactoring-plan.md
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note:** The `planning/` directory is git-ignored, so you can iterate freely without polluting git history.
|
||||||
|
|
||||||
|
### 2. Use the Planning Template
|
||||||
|
|
||||||
|
Every planning document should include:
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
# <Feature> Refactoring Plan
|
||||||
|
|
||||||
|
**Status**: 🔄 PLANNING | 🚧 IN PROGRESS | ✅ COMPLETED | ❌ CANCELLED
|
||||||
|
**Created**: YYYY-MM-DD
|
||||||
|
**Last Updated**: YYYY-MM-DD
|
||||||
|
|
||||||
|
## Problem Statement
|
||||||
|
|
||||||
|
- What's the issue?
|
||||||
|
- Why does it need fixing?
|
||||||
|
- Current pain points
|
||||||
|
|
||||||
|
## Proposed Solution
|
||||||
|
|
||||||
|
- High-level approach
|
||||||
|
- File structure (before/after)
|
||||||
|
- Module responsibilities
|
||||||
|
|
||||||
|
## Migration Strategy
|
||||||
|
|
||||||
|
- Phase-by-phase breakdown
|
||||||
|
- File lifecycle (CREATE/MODIFY/DELETE/RENAME)
|
||||||
|
- Dependencies between phases
|
||||||
|
- Testing checkpoints
|
||||||
|
|
||||||
|
## Risks & Mitigation
|
||||||
|
|
||||||
|
- What could go wrong?
|
||||||
|
- How to prevent it?
|
||||||
|
- Rollback strategy
|
||||||
|
|
||||||
|
## Success Criteria
|
||||||
|
|
||||||
|
- Measurable improvements
|
||||||
|
- Testing requirements
|
||||||
|
- Verification steps
|
||||||
|
```
|
||||||
|
|
||||||
|
See `planning/README.md` for detailed template explanation.
|
||||||
|
|
||||||
|
### 3. Iterate Freely
|
||||||
|
|
||||||
|
Since `planning/` is git-ignored:
|
||||||
|
|
||||||
|
- Draft multiple versions
|
||||||
|
- Get AI assistance without commit pressure
|
||||||
|
- Refine until the plan is solid
|
||||||
|
- No need to clean up intermediate versions
|
||||||
|
|
||||||
|
### 4. Implementation Phase
|
||||||
|
|
||||||
|
Once plan is approved:
|
||||||
|
|
||||||
|
- Follow the phases defined in the plan
|
||||||
|
- Test after each phase (don't skip!)
|
||||||
|
- Update plan if issues discovered
|
||||||
|
- Track progress through phase status
|
||||||
|
|
||||||
|
### 5. After Completion
|
||||||
|
|
||||||
|
**Option A: Archive in docs/development/**
|
||||||
|
If the plan has lasting value (successful pattern, reusable approach):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mv planning/my-feature-refactoring-plan.md docs/development/
|
||||||
|
git add docs/development/my-feature-refactoring-plan.md
|
||||||
|
git commit -m "docs: archive successful refactoring plan"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Option B: Delete**
|
||||||
|
If the plan served its purpose and code is the source of truth:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
rm planning/my-feature-refactoring-plan.md
|
||||||
|
```
|
||||||
|
|
||||||
|
**Option C: Keep locally (not committed)**
|
||||||
|
For "why we didn't do X" reference:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir -p planning/archive
|
||||||
|
mv planning/my-feature-refactoring-plan.md planning/archive/
|
||||||
|
# Still git-ignored, just organized
|
||||||
|
```
|
||||||
|
|
||||||
|
## Real-World Example
|
||||||
|
|
||||||
|
The **sensor/ package refactoring** (Nov 2025) is a successful example:
|
||||||
|
|
||||||
|
**Before:**
|
||||||
|
|
||||||
|
- `sensor.py` - 2,574 lines, hard to navigate
|
||||||
|
|
||||||
|
**After:**
|
||||||
|
|
||||||
|
- `sensor/` package with 5 focused modules
|
||||||
|
- Each module `<`800 lines
|
||||||
|
- Clear separation of concerns
|
||||||
|
|
||||||
|
**Process:**
|
||||||
|
|
||||||
|
1. Created `planning/module-splitting-plan.md` (now in `docs/development/`)
|
||||||
|
2. Defined 6 phases with clear file lifecycle
|
||||||
|
3. Implemented phase by phase
|
||||||
|
4. Tested after each phase
|
||||||
|
5. Documented in AGENTS.md
|
||||||
|
6. Moved plan to `docs/development/` as reference
|
||||||
|
|
||||||
|
**Key learnings:**
|
||||||
|
|
||||||
|
- Temporary `_impl.py` files avoid Python package conflicts
|
||||||
|
- Test after EVERY phase (don't accumulate changes)
|
||||||
|
- Clear file lifecycle (CREATE/MODIFY/DELETE/RENAME)
|
||||||
|
- Phase-by-phase approach enables safe rollback
|
||||||
|
|
||||||
|
**Note:** The complete module splitting plan was documented during implementation but has been superseded by the actual code structure.
|
||||||
|
|
||||||
|
## Phase-by-Phase Implementation
|
||||||
|
|
||||||
|
### Why Phases Matter
|
||||||
|
|
||||||
|
Breaking refactorings into phases:
|
||||||
|
|
||||||
|
- ✅ Enables testing after each change (catch bugs early)
|
||||||
|
- ✅ Allows rollback to last good state
|
||||||
|
- ✅ Makes progress visible
|
||||||
|
- ✅ Reduces cognitive load (focus on one thing)
|
||||||
|
- ❌ Takes more time (but worth it!)
|
||||||
|
|
||||||
|
### Phase Structure
|
||||||
|
|
||||||
|
Each phase should:
|
||||||
|
|
||||||
|
1. **Have clear goal** - What's being changed?
|
||||||
|
2. **Document file lifecycle** - CREATE/MODIFY/DELETE/RENAME
|
||||||
|
3. **Define success criteria** - How to verify it worked?
|
||||||
|
4. **Include testing steps** - What to test?
|
||||||
|
5. **Estimate time** - Realistic time budget
|
||||||
|
|
||||||
|
### Example Phase Documentation
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
### Phase 3: Extract Helper Functions (Session 3)
|
||||||
|
|
||||||
|
**Goal**: Move pure utility functions to helpers.py
|
||||||
|
|
||||||
|
**File Lifecycle**:
|
||||||
|
|
||||||
|
- ✨ CREATE `sensor/helpers.py` (utility functions)
|
||||||
|
- ✏️ MODIFY `sensor/core.py` (import from helpers.py)
|
||||||
|
|
||||||
|
**Steps**:
|
||||||
|
|
||||||
|
1. Create sensor/helpers.py
|
||||||
|
2. Move pure functions (no state, no self)
|
||||||
|
3. Add comprehensive docstrings
|
||||||
|
4. Update imports in core.py
|
||||||
|
|
||||||
|
**Estimated time**: 45 minutes
|
||||||
|
|
||||||
|
**Success criteria**:
|
||||||
|
|
||||||
|
- ✅ All pure functions moved
|
||||||
|
- ✅ `./scripts/lint-check` passes
|
||||||
|
- ✅ HA starts successfully
|
||||||
|
- ✅ All entities work correctly
|
||||||
|
```
|
||||||
|
|
||||||
|
## Testing Strategy
|
||||||
|
|
||||||
|
### After Each Phase
|
||||||
|
|
||||||
|
Minimum testing checklist:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Linting passes
|
||||||
|
./scripts/lint-check
|
||||||
|
|
||||||
|
# 2. Home Assistant starts
|
||||||
|
./scripts/develop
|
||||||
|
# Watch for startup errors in logs
|
||||||
|
|
||||||
|
# 3. Integration loads
|
||||||
|
# Check: Settings → Devices & Services → Tibber Prices
|
||||||
|
# Verify: All entities appear
|
||||||
|
|
||||||
|
# 4. Basic functionality
|
||||||
|
# Test: Data updates without errors
|
||||||
|
# Check: Entity states update correctly
|
||||||
|
```
|
||||||
|
|
||||||
|
### Comprehensive Testing (Final Phase)
|
||||||
|
|
||||||
|
After completing all phases:
|
||||||
|
|
||||||
|
- Test all entities (sensors, binary sensors)
|
||||||
|
- Test configuration flow (add/modify/remove)
|
||||||
|
- Test options flow (change settings)
|
||||||
|
- Test services (custom service calls)
|
||||||
|
- Test error handling (disconnect API, invalid data)
|
||||||
|
- Test caching (restart HA, verify cache loads)
|
||||||
|
- Test time-based updates (quarter-hour refresh)
|
||||||
|
|
||||||
|
## Common Pitfalls
|
||||||
|
|
||||||
|
### ❌ Skip Planning for Large Changes
|
||||||
|
|
||||||
|
**Problem:** "This seems straightforward, I'll just start coding..."
|
||||||
|
|
||||||
|
**Result:** Halfway through, realize the approach doesn't work. Wasted time.
|
||||||
|
|
||||||
|
**Solution:** If unsure, spend 30 minutes on a rough plan. Better to plan and discard than get stuck.
|
||||||
|
|
||||||
|
### ❌ Implement All Phases at Once
|
||||||
|
|
||||||
|
**Problem:** "I'll do all phases, then test everything..."
|
||||||
|
|
||||||
|
**Result:** 10+ files changed, 2000+ lines modified, hard to debug if something breaks.
|
||||||
|
|
||||||
|
**Solution:** Test after EVERY phase. Commit after each successful phase.
|
||||||
|
|
||||||
|
### ❌ Forget to Update Documentation
|
||||||
|
|
||||||
|
**Problem:** Code is refactored, but AGENTS.md and docs/ still reference old structure.
|
||||||
|
|
||||||
|
**Result:** AI/humans get confused by outdated documentation.
|
||||||
|
|
||||||
|
**Solution:** Include "Documentation Phase" at the end of every refactoring plan.
|
||||||
|
|
||||||
|
### ❌ Ignore the Planning Directory
|
||||||
|
|
||||||
|
**Problem:** "I'll just create the plan in docs/ directly..."
|
||||||
|
|
||||||
|
**Result:** Git history polluted with draft iterations, or pressure to "commit something" too early.
|
||||||
|
|
||||||
|
**Solution:** Always use `planning/` for work-in-progress. Move to `docs/` only when done.
|
||||||
|
|
||||||
|
## Integration with AI Development
|
||||||
|
|
||||||
|
This project uses AI heavily (GitHub Copilot, Claude). The planning process supports AI development:
|
||||||
|
|
||||||
|
**AI reads from:**
|
||||||
|
|
||||||
|
- `AGENTS.md` - Long-term memory, patterns, conventions (AI-focused)
|
||||||
|
- `docs/development/` - Human-readable guides (human-focused)
|
||||||
|
- `planning/` - Active refactoring plans (shared context)
|
||||||
|
|
||||||
|
**AI updates:**
|
||||||
|
|
||||||
|
- `AGENTS.md` - When patterns change
|
||||||
|
- `planning/*.md` - During refactoring implementation
|
||||||
|
- `docs/development/` - After successful completion
|
||||||
|
|
||||||
|
**Why separate AGENTS.md and docs/development/?**
|
||||||
|
|
||||||
|
- `AGENTS.md`: Technical, comprehensive, AI-optimized
|
||||||
|
- `docs/development/`: Practical, focused, human-optimized
|
||||||
|
- Both stay in sync but serve different audiences
|
||||||
|
|
||||||
|
See [AGENTS.md](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.23.1/AGENTS.md) section "Planning Major Refactorings" for AI-specific guidance.
|
||||||
|
|
||||||
|
## Tools and Resources
|
||||||
|
|
||||||
|
### Planning Directory
|
||||||
|
|
||||||
|
- `planning/` - Git-ignored workspace for drafts
|
||||||
|
- `planning/README.md` - Detailed planning documentation
|
||||||
|
- `planning/*.md` - Active refactoring plans
|
||||||
|
|
||||||
|
### Example Plans
|
||||||
|
|
||||||
|
- `docs/development/module-splitting-plan.md` - ✅ Completed, archived
|
||||||
|
- `planning/config-flow-refactoring-plan.md` - 🔄 Planned (1013 lines → 4 modules)
|
||||||
|
- `planning/binary-sensor-refactoring-plan.md` - 🔄 Planned (644 lines → 4 modules)
|
||||||
|
- `planning/coordinator-refactoring-plan.md` - 🔄 Planned (1446 lines, high complexity)
|
||||||
|
|
||||||
|
### Helper Scripts
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/lint-check # Verify code quality
|
||||||
|
./scripts/develop # Start HA for testing
|
||||||
|
./scripts/lint # Auto-fix issues
|
||||||
|
```
|
||||||
|
|
||||||
|
## FAQ
|
||||||
|
|
||||||
|
### Q: When should I create a plan vs. just start coding?
|
||||||
|
|
||||||
|
**A:** If you're asking this question, you probably need a plan. 😊
|
||||||
|
|
||||||
|
Simple rule: If you can't describe the entire change in 3 sentences, create a plan.
|
||||||
|
|
||||||
|
### Q: How detailed should the plan be?
|
||||||
|
|
||||||
|
**A:** Detailed enough to execute without major surprises, but not a line-by-line script.
|
||||||
|
|
||||||
|
Good plan level:
|
||||||
|
|
||||||
|
- Lists all files affected (CREATE/MODIFY/DELETE)
|
||||||
|
- Defines phases with clear boundaries
|
||||||
|
- Includes testing strategy
|
||||||
|
- Estimates time per phase
|
||||||
|
|
||||||
|
Too detailed:
|
||||||
|
|
||||||
|
- Exact code snippets for every change
|
||||||
|
- Line-by-line instructions
|
||||||
|
|
||||||
|
Too vague:
|
||||||
|
|
||||||
|
- "Refactor sensor.py to be better"
|
||||||
|
- No phase breakdown
|
||||||
|
- No testing strategy
|
||||||
|
|
||||||
|
### Q: What if the plan changes during implementation?
|
||||||
|
|
||||||
|
**A:** Update the plan! Planning documents are living documents.
|
||||||
|
|
||||||
|
If you discover:
|
||||||
|
|
||||||
|
- Better approach → Update "Proposed Solution"
|
||||||
|
- More phases needed → Add to "Migration Strategy"
|
||||||
|
- New risks → Update "Risks & Mitigation"
|
||||||
|
|
||||||
|
Document WHY the plan changed (helps future refactorings).
|
||||||
|
|
||||||
|
### Q: Should every refactoring follow this process?
|
||||||
|
|
||||||
|
**A:** No! Use judgment:
|
||||||
|
|
||||||
|
- **Small changes (`<`100 lines, clear approach)**: Just do it, no plan needed
|
||||||
|
- **Medium changes (unclear scope)**: Write rough outline, refine if needed
|
||||||
|
- **Large changes (>500 lines, >5 files)**: Full planning process
|
||||||
|
|
||||||
|
### Q: How do I know when a refactoring is successful?
|
||||||
|
|
||||||
|
**A:** Check the "Success Criteria" from your plan:
|
||||||
|
|
||||||
|
Typical criteria:
|
||||||
|
|
||||||
|
- ✅ All linting checks pass
|
||||||
|
- ✅ HA starts without errors
|
||||||
|
- ✅ All entities functional
|
||||||
|
- ✅ No regressions (existing features work)
|
||||||
|
- ✅ Code easier to understand/modify
|
||||||
|
- ✅ Documentation updated
|
||||||
|
|
||||||
|
If you can't tick all boxes, the refactoring isn't done.
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
**Key takeaways:**
|
||||||
|
|
||||||
|
1. **Plan when scope is unclear** (>500 lines, >5 files, breaking changes)
|
||||||
|
2. **Use planning/ directory** for free iteration (git-ignored)
|
||||||
|
3. **Work in phases** and test after each phase
|
||||||
|
4. **Document file lifecycle** (CREATE/MODIFY/DELETE/RENAME)
|
||||||
|
5. **Update documentation** after completion (AGENTS.md, docs/)
|
||||||
|
6. **Archive or delete** plan after implementation
|
||||||
|
|
||||||
|
**Remember:** Good planning prevents half-finished refactorings and makes rollback easier when things go wrong.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Next steps:**
|
||||||
|
|
||||||
|
- Read `planning/README.md` for detailed template
|
||||||
|
- Check `docs/development/module-splitting-plan.md` for real example
|
||||||
|
- Browse `planning/` for active refactoring plans
|
||||||
|
|
@ -0,0 +1,365 @@
|
||||||
|
---
|
||||||
|
comments: false
|
||||||
|
---
|
||||||
|
|
||||||
|
# Release Notes Generation
|
||||||
|
|
||||||
|
This project supports **three ways** to generate release notes from conventional commits, plus **automatic version management**.
|
||||||
|
|
||||||
|
## 🚀 Quick Start: Preparing a Release
|
||||||
|
|
||||||
|
**Recommended workflow (automatic & foolproof):**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Use the helper script to prepare release
|
||||||
|
./scripts/release/prepare 0.3.0
|
||||||
|
|
||||||
|
# This will:
|
||||||
|
# - Update manifest.json version to 0.3.0
|
||||||
|
# - Create commit: "chore(release): bump version to 0.3.0"
|
||||||
|
# - Create tag: v0.3.0
|
||||||
|
# - Show you what will be pushed
|
||||||
|
|
||||||
|
# 2. Review and push when ready
|
||||||
|
git push origin main v0.3.0
|
||||||
|
|
||||||
|
# 3. CI/CD automatically:
|
||||||
|
# - Detects the new tag
|
||||||
|
# - Generates release notes (excluding version bump commit)
|
||||||
|
# - Creates GitHub release
|
||||||
|
```
|
||||||
|
|
||||||
|
**If you forget to bump manifest.json:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Just edit manifest.json manually and commit
|
||||||
|
vim custom_components/tibber_prices/manifest.json # "version": "0.3.0"
|
||||||
|
git commit -am "chore(release): bump version to 0.3.0"
|
||||||
|
git push
|
||||||
|
|
||||||
|
# Auto-Tag workflow detects manifest.json change and creates tag automatically!
|
||||||
|
# Then Release workflow kicks in and creates the GitHub release
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📋 Release Options
|
||||||
|
|
||||||
|
### 1. GitHub UI Button (Easiest)
|
||||||
|
|
||||||
|
Use GitHub's built-in release notes generator:
|
||||||
|
|
||||||
|
1. Go to [Releases](https://github.com/jpawlowski/hass.tibber_prices/releases)
|
||||||
|
2. Click "Draft a new release"
|
||||||
|
3. Select your tag
|
||||||
|
4. Click "Generate release notes" button
|
||||||
|
5. Edit if needed and publish
|
||||||
|
|
||||||
|
**Uses:** `.github/release.yml` configuration
|
||||||
|
**Best for:** Quick releases, works with PRs that have labels
|
||||||
|
**Note:** Direct commits appear in "Other Changes" category
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2. Local Script (Intelligent)
|
||||||
|
|
||||||
|
Run `./scripts/release/generate-notes` to parse conventional commits locally.
|
||||||
|
|
||||||
|
**Automatic backend detection:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate from latest tag to HEAD
|
||||||
|
./scripts/release/generate-notes
|
||||||
|
|
||||||
|
# Generate between specific tags
|
||||||
|
./scripts/release/generate-notes v1.0.0 v1.1.0
|
||||||
|
|
||||||
|
# Generate from tag to HEAD
|
||||||
|
./scripts/release/generate-notes v1.0.0 HEAD
|
||||||
|
```
|
||||||
|
|
||||||
|
**Force specific backend:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Use AI (GitHub Copilot CLI)
|
||||||
|
RELEASE_NOTES_BACKEND=copilot ./scripts/release/generate-notes
|
||||||
|
|
||||||
|
# Use git-cliff (template-based)
|
||||||
|
RELEASE_NOTES_BACKEND=git-cliff ./scripts/release/generate-notes
|
||||||
|
|
||||||
|
# Use manual parsing (grep/awk fallback)
|
||||||
|
RELEASE_NOTES_BACKEND=manual ./scripts/release/generate-notes
|
||||||
|
```
|
||||||
|
|
||||||
|
**Disable AI** (useful for CI/CD):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
USE_AI=false ./scripts/release/generate-notes
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Backend Priority
|
||||||
|
|
||||||
|
The script automatically selects the best available backend:
|
||||||
|
|
||||||
|
1. **GitHub Copilot CLI** - AI-powered, context-aware (best quality)
|
||||||
|
2. **git-cliff** - Fast Rust tool with templates (reliable)
|
||||||
|
3. **Manual** - Simple grep/awk parsing (always works)
|
||||||
|
|
||||||
|
In CI/CD (`$CI` or `$GITHUB_ACTIONS`), AI is automatically disabled.
|
||||||
|
|
||||||
|
#### Installing Optional Backends
|
||||||
|
|
||||||
|
**In DevContainer (automatic):**
|
||||||
|
|
||||||
|
git-cliff is automatically installed when the DevContainer is built:
|
||||||
|
- **Rust toolchain**: Installed via `ghcr.io/devcontainers/features/rust:1` (minimal profile)
|
||||||
|
- **git-cliff**: Installed via cargo in `scripts/setup/setup`
|
||||||
|
|
||||||
|
Simply rebuild the container (VS Code: "Dev Containers: Rebuild Container") and git-cliff will be available.
|
||||||
|
|
||||||
|
**Manual installation (outside DevContainer):**
|
||||||
|
|
||||||
|
**git-cliff** (template-based):
|
||||||
|
```bash
|
||||||
|
# See: https://git-cliff.org/docs/installation
|
||||||
|
|
||||||
|
# macOS
|
||||||
|
brew install git-cliff
|
||||||
|
|
||||||
|
# Cargo (all platforms)
|
||||||
|
cargo install git-cliff
|
||||||
|
|
||||||
|
# Manual binary download
|
||||||
|
wget https://github.com/orhun/git-cliff/releases/latest/download/git-cliff-x86_64-unknown-linux-gnu.tar.gz
|
||||||
|
tar -xzf git-cliff-*.tar.gz
|
||||||
|
sudo mv git-cliff-*/git-cliff /usr/local/bin/
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3. CI/CD Automation
|
||||||
|
|
||||||
|
Automatic release notes on tag push.
|
||||||
|
|
||||||
|
**Workflow:** `.github/workflows/release.yml`
|
||||||
|
|
||||||
|
**Triggers:** Version tags (`v1.0.0`, `v2.1.3`, etc.)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create and push a tag to trigger automatic release
|
||||||
|
git tag v1.0.0
|
||||||
|
git push origin v1.0.0
|
||||||
|
|
||||||
|
# GitHub Actions will:
|
||||||
|
# 1. Detect the new tag
|
||||||
|
# 2. Generate release notes using git-cliff
|
||||||
|
# 3. Create a GitHub release automatically
|
||||||
|
```
|
||||||
|
|
||||||
|
**Backend:** Uses `git-cliff` (AI disabled in CI for reliability)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📝 Output Format
|
||||||
|
|
||||||
|
All methods produce GitHub-flavored Markdown with emoji categories:
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
## 🎉 New Features
|
||||||
|
|
||||||
|
- **scope**: Description ([abc1234](link-to-commit))
|
||||||
|
|
||||||
|
## 🐛 Bug Fixes
|
||||||
|
|
||||||
|
- **scope**: Description ([def5678](link-to-commit))
|
||||||
|
|
||||||
|
## 📚 Documentation
|
||||||
|
|
||||||
|
- **scope**: Description ([ghi9012](link-to-commit))
|
||||||
|
|
||||||
|
## 🔧 Maintenance & Refactoring
|
||||||
|
|
||||||
|
- **scope**: Description ([jkl3456](link-to-commit))
|
||||||
|
|
||||||
|
## 🧪 Testing
|
||||||
|
|
||||||
|
- **scope**: Description ([mno7890](link-to-commit))
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🎯 When to Use Which
|
||||||
|
|
||||||
|
| Method | Use Case | Pros | Cons |
|
||||||
|
|--------|----------|------|------|
|
||||||
|
| **Helper Script** | Normal releases | Foolproof, automatic | Requires script |
|
||||||
|
| **Auto-Tag Workflow** | Forgot script | Safety net, automatic tagging | Still need manifest bump |
|
||||||
|
| **GitHub Button** | Manual quick release | Easy, no script | Limited categorization |
|
||||||
|
| **Local Script** | Testing release notes | Preview before release | Manual process |
|
||||||
|
| **CI/CD** | After tag push | Fully automatic | Needs tag first |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🔄 Complete Release Workflows
|
||||||
|
|
||||||
|
### Workflow A: Using Helper Script (Recommended)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Step 1: Prepare release (all-in-one)
|
||||||
|
./scripts/release/prepare 0.3.0
|
||||||
|
|
||||||
|
# Step 2: Review changes
|
||||||
|
git log -1 --stat
|
||||||
|
git show v0.3.0
|
||||||
|
|
||||||
|
# Step 3: Push when ready
|
||||||
|
git push origin main v0.3.0
|
||||||
|
|
||||||
|
# Done! CI/CD creates the release automatically
|
||||||
|
```
|
||||||
|
|
||||||
|
**What happens:**
|
||||||
|
1. Script bumps manifest.json → commits → creates tag locally
|
||||||
|
2. You push commit + tag together
|
||||||
|
3. Release workflow sees tag → generates notes → creates release
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Workflow B: Manual (with Auto-Tag Safety Net)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Step 1: Bump version manually
|
||||||
|
vim custom_components/tibber_prices/manifest.json
|
||||||
|
# Change: "version": "0.3.0"
|
||||||
|
|
||||||
|
# Step 2: Commit
|
||||||
|
git commit -am "chore(release): bump version to 0.3.0"
|
||||||
|
git push
|
||||||
|
|
||||||
|
# Step 3: Wait for Auto-Tag workflow
|
||||||
|
# GitHub Actions automatically creates v0.3.0 tag
|
||||||
|
# Then Release workflow creates the release
|
||||||
|
```
|
||||||
|
|
||||||
|
**What happens:**
|
||||||
|
1. You push manifest.json change
|
||||||
|
2. Auto-Tag workflow detects change → creates tag automatically
|
||||||
|
3. Release workflow sees new tag → creates release
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Workflow C: Manual Tag (Old Way)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Step 1: Bump version
|
||||||
|
vim custom_components/tibber_prices/manifest.json
|
||||||
|
git commit -am "chore(release): bump version to 0.3.0"
|
||||||
|
|
||||||
|
# Step 2: Create tag manually
|
||||||
|
git tag v0.3.0
|
||||||
|
git push origin main v0.3.0
|
||||||
|
|
||||||
|
# Release workflow creates release
|
||||||
|
```
|
||||||
|
|
||||||
|
**What happens:**
|
||||||
|
1. You create and push tag manually
|
||||||
|
2. Release workflow creates release
|
||||||
|
3. Auto-Tag workflow skips (tag already exists)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## ⚙️ Configuration Files
|
||||||
|
|
||||||
|
- `scripts/release/prepare` - Helper script to bump version + create tag
|
||||||
|
- `.github/workflows/auto-tag.yml` - Automatic tag creation on manifest.json change
|
||||||
|
- `.github/workflows/release.yml` - Automatic release on tag push
|
||||||
|
- `.github/release.yml` - GitHub UI button configuration
|
||||||
|
- `cliff.toml` - git-cliff template (filters out version bumps)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🛡️ Safety Features
|
||||||
|
|
||||||
|
### 1. **Version Validation**
|
||||||
|
Both helper script and auto-tag workflow validate version format (X.Y.Z).
|
||||||
|
|
||||||
|
### 2. **No Duplicate Tags**
|
||||||
|
- Helper script checks if tag exists (local + remote)
|
||||||
|
- Auto-tag workflow checks if tag exists before creating
|
||||||
|
|
||||||
|
### 3. **Atomic Operations**
|
||||||
|
Helper script creates commit + tag locally. You decide when to push.
|
||||||
|
|
||||||
|
### 4. **Version Bumps Filtered**
|
||||||
|
Release notes automatically exclude `chore(release): bump version` commits.
|
||||||
|
|
||||||
|
### 5. **Rollback Instructions**
|
||||||
|
Helper script shows how to undo if you change your mind.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🐛 Troubleshooting
|
||||||
|
|
||||||
|
**"Tag already exists" error:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Local tag
|
||||||
|
git tag -d v0.3.0
|
||||||
|
|
||||||
|
# Remote tag (only if you need to recreate)
|
||||||
|
git push origin :refs/tags/v0.3.0
|
||||||
|
```
|
||||||
|
|
||||||
|
**Manifest version doesn't match tag:**
|
||||||
|
|
||||||
|
This shouldn't happen with the new workflows, but if it does:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Fix manifest.json
|
||||||
|
vim custom_components/tibber_prices/manifest.json
|
||||||
|
|
||||||
|
# 2. Amend the commit
|
||||||
|
git commit --amend -am "chore(release): bump version to 0.3.0"
|
||||||
|
|
||||||
|
# 3. Move the tag
|
||||||
|
git tag -f v0.3.0
|
||||||
|
git push -f origin main v0.3.0
|
||||||
|
```
|
||||||
|
|
||||||
|
**Auto-tag didn't create tag:**
|
||||||
|
|
||||||
|
Check workflow runs in GitHub Actions. Common causes:
|
||||||
|
- Tag already exists remotely
|
||||||
|
- Invalid version format in manifest.json
|
||||||
|
- manifest.json not in the commit that was pushed
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🔍 Format Requirements
|
||||||
|
|
||||||
|
**HACS:** No specific format required, uses GitHub releases as-is
|
||||||
|
**Home Assistant:** No specific format required for custom integrations
|
||||||
|
**Markdown:** Standard GitHub-flavored Markdown supported
|
||||||
|
**HTML:** Can include `<ha-alert>` tags if needed
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 💡 Tips
|
||||||
|
|
||||||
|
1. **Conventional Commits:** Use proper commit format for best results:
|
||||||
|
```
|
||||||
|
feat(scope): Add new feature
|
||||||
|
|
||||||
|
Detailed description of what changed.
|
||||||
|
|
||||||
|
Impact: Users can now do X and Y.
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Impact Section:** Add `Impact:` in commit body for user-friendly descriptions
|
||||||
|
|
||||||
|
3. **Test Locally:** Run `./scripts/release/generate-notes` before creating release
|
||||||
|
|
||||||
|
4. **AI vs Template:** GitHub Copilot CLI provides better descriptions, git-cliff is faster and more reliable
|
||||||
|
|
||||||
|
5. **CI/CD:** Tag push triggers automatic release - no manual intervention needed
|
||||||
330
docs/developer/versioned_docs/version-v0.23.1/repairs-system.md
Normal file
330
docs/developer/versioned_docs/version-v0.23.1/repairs-system.md
Normal file
|
|
@ -0,0 +1,330 @@
|
||||||
|
# Repairs System
|
||||||
|
|
||||||
|
The Tibber Prices integration includes a proactive repair notification system that alerts users to important issues requiring attention. This system leverages Home Assistant's built-in `issue_registry` to create user-facing notifications in the UI.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The repairs system is implemented in `coordinator/repairs.py` via the `TibberPricesRepairManager` class, which is instantiated in the coordinator and integrated into the update cycle.
|
||||||
|
|
||||||
|
**Design Principles:**
|
||||||
|
- **Proactive**: Detect issues before they become critical
|
||||||
|
- **User-friendly**: Clear explanations with actionable guidance
|
||||||
|
- **Auto-clearing**: Repairs automatically disappear when conditions resolve
|
||||||
|
- **Non-blocking**: Integration continues to work even with active repairs
|
||||||
|
|
||||||
|
## Implemented Repair Types
|
||||||
|
|
||||||
|
### 1. Tomorrow Data Missing
|
||||||
|
|
||||||
|
**Issue ID:** `tomorrow_data_missing_{entry_id}`
|
||||||
|
|
||||||
|
**When triggered:**
|
||||||
|
- Current time is after 18:00 (configurable via `TOMORROW_DATA_WARNING_HOUR`)
|
||||||
|
- Tomorrow's electricity price data is still not available
|
||||||
|
|
||||||
|
**When cleared:**
|
||||||
|
- Tomorrow's data becomes available
|
||||||
|
- Automatically checks on every successful API update
|
||||||
|
|
||||||
|
**User impact:**
|
||||||
|
Users cannot plan ahead for tomorrow's electricity usage optimization. Automations relying on tomorrow's prices will not work.
|
||||||
|
|
||||||
|
**Implementation:**
|
||||||
|
```python
|
||||||
|
# In coordinator update cycle
|
||||||
|
has_tomorrow_data = self._data_fetcher.has_tomorrow_data(result["priceInfo"])
|
||||||
|
await self._repair_manager.check_tomorrow_data_availability(
|
||||||
|
has_tomorrow_data=has_tomorrow_data,
|
||||||
|
current_time=current_time,
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Translation placeholders:**
|
||||||
|
- `home_name`: Name of the affected home
|
||||||
|
- `warning_hour`: Hour after which warning appears (default: 18)
|
||||||
|
|
||||||
|
### 2. Rate Limit Exceeded
|
||||||
|
|
||||||
|
**Issue ID:** `rate_limit_exceeded_{entry_id}`
|
||||||
|
|
||||||
|
**When triggered:**
|
||||||
|
- Integration encounters 3 or more consecutive rate limit errors (HTTP 429)
|
||||||
|
- Threshold configurable via `RATE_LIMIT_WARNING_THRESHOLD`
|
||||||
|
|
||||||
|
**When cleared:**
|
||||||
|
- Successful API call completes (no rate limit error)
|
||||||
|
- Error counter resets to 0
|
||||||
|
|
||||||
|
**User impact:**
|
||||||
|
API requests are being throttled, causing stale data. Updates may be delayed until rate limit expires.
|
||||||
|
|
||||||
|
**Implementation:**
|
||||||
|
```python
|
||||||
|
# In error handler
|
||||||
|
is_rate_limit = (
|
||||||
|
"429" in error_str
|
||||||
|
or "rate limit" in error_str
|
||||||
|
or "too many requests" in error_str
|
||||||
|
)
|
||||||
|
if is_rate_limit:
|
||||||
|
await self._repair_manager.track_rate_limit_error()
|
||||||
|
|
||||||
|
# On successful update
|
||||||
|
await self._repair_manager.clear_rate_limit_tracking()
|
||||||
|
```
|
||||||
|
|
||||||
|
**Translation placeholders:**
|
||||||
|
- `home_name`: Name of the affected home
|
||||||
|
- `error_count`: Number of consecutive rate limit errors
|
||||||
|
|
||||||
|
### 3. Home Not Found
|
||||||
|
|
||||||
|
**Issue ID:** `home_not_found_{entry_id}`
|
||||||
|
|
||||||
|
**When triggered:**
|
||||||
|
- Home configured in this integration is no longer present in Tibber account
|
||||||
|
- Detected during user data refresh (daily check)
|
||||||
|
|
||||||
|
**When cleared:**
|
||||||
|
- Home reappears in Tibber account (unlikely - manual cleanup expected)
|
||||||
|
- Integration entry is removed (shutdown cleanup)
|
||||||
|
|
||||||
|
**User impact:**
|
||||||
|
Integration cannot fetch data for a non-existent home. User must remove the config entry and re-add if needed.
|
||||||
|
|
||||||
|
**Implementation:**
|
||||||
|
```python
|
||||||
|
# After user data update
|
||||||
|
home_exists = self._data_fetcher._check_home_exists(home_id)
|
||||||
|
if not home_exists:
|
||||||
|
await self._repair_manager.create_home_not_found_repair()
|
||||||
|
else:
|
||||||
|
await self._repair_manager.clear_home_not_found_repair()
|
||||||
|
```
|
||||||
|
|
||||||
|
**Translation placeholders:**
|
||||||
|
- `home_name`: Name of the missing home
|
||||||
|
- `entry_id`: Config entry ID for reference
|
||||||
|
|
||||||
|
## Configuration Constants
|
||||||
|
|
||||||
|
Defined in `coordinator/constants.py`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
TOMORROW_DATA_WARNING_HOUR = 18 # Hour after which to warn about missing tomorrow data
|
||||||
|
RATE_LIMIT_WARNING_THRESHOLD = 3 # Number of consecutive errors before creating repair
|
||||||
|
```
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### Class Structure
|
||||||
|
|
||||||
|
```python
|
||||||
|
class TibberPricesRepairManager:
|
||||||
|
"""Manages repair issues for a single Tibber home."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
hass: HomeAssistant,
|
||||||
|
entry_id: str,
|
||||||
|
home_name: str,
|
||||||
|
) -> None:
|
||||||
|
"""Initialize repair manager."""
|
||||||
|
self._hass = hass
|
||||||
|
self._entry_id = entry_id
|
||||||
|
self._home_name = home_name
|
||||||
|
|
||||||
|
# State tracking
|
||||||
|
self._tomorrow_data_repair_active = False
|
||||||
|
self._rate_limit_error_count = 0
|
||||||
|
self._rate_limit_repair_active = False
|
||||||
|
self._home_not_found_repair_active = False
|
||||||
|
```
|
||||||
|
|
||||||
|
### State Tracking
|
||||||
|
|
||||||
|
Each repair type maintains internal state to avoid redundant operations:
|
||||||
|
|
||||||
|
- **`_tomorrow_data_repair_active`**: Boolean flag, prevents creating duplicate repairs
|
||||||
|
- **`_rate_limit_error_count`**: Integer counter, tracks consecutive errors
|
||||||
|
- **`_rate_limit_repair_active`**: Boolean flag, tracks repair status
|
||||||
|
- **`_home_not_found_repair_active`**: Boolean flag, one-time repair (manual cleanup)
|
||||||
|
|
||||||
|
### Lifecycle Integration
|
||||||
|
|
||||||
|
**Coordinator Initialization:**
|
||||||
|
```python
|
||||||
|
self._repair_manager = TibberPricesRepairManager(
|
||||||
|
hass=hass,
|
||||||
|
entry_id=self.config_entry.entry_id,
|
||||||
|
home_name=self._home_name,
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Update Cycle Integration:**
|
||||||
|
```python
|
||||||
|
# Success path - check conditions
|
||||||
|
if result and "priceInfo" in result:
|
||||||
|
has_tomorrow_data = self._data_fetcher.has_tomorrow_data(result["priceInfo"])
|
||||||
|
await self._repair_manager.check_tomorrow_data_availability(
|
||||||
|
has_tomorrow_data=has_tomorrow_data,
|
||||||
|
current_time=current_time,
|
||||||
|
)
|
||||||
|
await self._repair_manager.clear_rate_limit_tracking()
|
||||||
|
|
||||||
|
# Error path - track rate limits
|
||||||
|
if is_rate_limit:
|
||||||
|
await self._repair_manager.track_rate_limit_error()
|
||||||
|
```
|
||||||
|
|
||||||
|
**Shutdown Cleanup:**
|
||||||
|
```python
|
||||||
|
async def async_shutdown(self) -> None:
|
||||||
|
"""Shut down coordinator and clean up."""
|
||||||
|
await self._repair_manager.clear_all_repairs()
|
||||||
|
# ... other cleanup ...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Translation System
|
||||||
|
|
||||||
|
Repairs use Home Assistant's standard translation system. Translations are defined in:
|
||||||
|
|
||||||
|
- `/translations/en.json`
|
||||||
|
- `/translations/de.json`
|
||||||
|
- `/translations/nb.json`
|
||||||
|
- `/translations/nl.json`
|
||||||
|
- `/translations/sv.json`
|
||||||
|
|
||||||
|
**Structure:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"issues": {
|
||||||
|
"tomorrow_data_missing": {
|
||||||
|
"title": "Tomorrow's price data missing for {home_name}",
|
||||||
|
"description": "Detailed explanation with multiple paragraphs...\n\nPossible causes:\n- Cause 1\n- Cause 2"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Home Assistant Integration
|
||||||
|
|
||||||
|
Repairs appear in:
|
||||||
|
- **Settings → System → Repairs** (main repairs panel)
|
||||||
|
- **Notifications** (bell icon in UI shows repair count)
|
||||||
|
|
||||||
|
Repair properties:
|
||||||
|
- **`is_fixable=False`**: No automated fix available (user action required)
|
||||||
|
- **`severity=IssueSeverity.WARNING`**: Yellow warning level (not critical)
|
||||||
|
- **`translation_key`**: References `issues.{key}` in translation files
|
||||||
|
|
||||||
|
## Testing Repairs
|
||||||
|
|
||||||
|
### Tomorrow Data Missing
|
||||||
|
|
||||||
|
1. Wait until after 18:00 local time
|
||||||
|
2. Ensure integration has no tomorrow price data
|
||||||
|
3. Repair should appear in UI
|
||||||
|
4. When tomorrow data arrives (next API fetch), repair clears
|
||||||
|
|
||||||
|
**Manual trigger:**
|
||||||
|
```python
|
||||||
|
# Temporarily set warning hour to current hour for testing
|
||||||
|
TOMORROW_DATA_WARNING_HOUR = datetime.now().hour
|
||||||
|
```
|
||||||
|
|
||||||
|
### Rate Limit Exceeded
|
||||||
|
|
||||||
|
1. Simulate 3+ consecutive rate limit errors
|
||||||
|
2. Repair should appear after 3rd error
|
||||||
|
3. Successful API call clears the repair
|
||||||
|
|
||||||
|
**Manual test:**
|
||||||
|
- Reduce API polling interval to trigger rate limiting
|
||||||
|
- Or temporarily return HTTP 429 in API client
|
||||||
|
|
||||||
|
### Home Not Found
|
||||||
|
|
||||||
|
1. Remove home from Tibber account via app/web
|
||||||
|
2. Wait for user data refresh (daily check)
|
||||||
|
3. Repair appears indicating home is missing
|
||||||
|
4. Remove integration entry to clear repair
|
||||||
|
|
||||||
|
## Adding New Repair Types
|
||||||
|
|
||||||
|
To add a new repair type:
|
||||||
|
|
||||||
|
1. **Add constants** (if needed) in `coordinator/constants.py`
|
||||||
|
2. **Add state tracking** in `TibberPricesRepairManager.__init__`
|
||||||
|
3. **Implement check method** with create/clear logic
|
||||||
|
4. **Add translations** to all 5 language files
|
||||||
|
5. **Integrate into coordinator** update cycle or error handlers
|
||||||
|
6. **Add cleanup** to `clear_all_repairs()` method
|
||||||
|
7. **Document** in this file
|
||||||
|
|
||||||
|
**Example template:**
|
||||||
|
```python
|
||||||
|
async def check_new_condition(self, *, param: bool) -> None:
|
||||||
|
"""Check new condition and create/clear repair."""
|
||||||
|
should_warn = param # Your condition logic
|
||||||
|
|
||||||
|
if should_warn and not self._new_repair_active:
|
||||||
|
await self._create_new_repair()
|
||||||
|
elif not should_warn and self._new_repair_active:
|
||||||
|
await self._clear_new_repair()
|
||||||
|
|
||||||
|
async def _create_new_repair(self) -> None:
|
||||||
|
"""Create new repair issue."""
|
||||||
|
_LOGGER.warning("New issue detected - creating repair")
|
||||||
|
|
||||||
|
ir.async_create_issue(
|
||||||
|
self._hass,
|
||||||
|
DOMAIN,
|
||||||
|
f"new_issue_{self._entry_id}",
|
||||||
|
is_fixable=False,
|
||||||
|
severity=ir.IssueSeverity.WARNING,
|
||||||
|
translation_key="new_issue",
|
||||||
|
translation_placeholders={
|
||||||
|
"home_name": self._home_name,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self._new_repair_active = True
|
||||||
|
|
||||||
|
async def _clear_new_repair(self) -> None:
|
||||||
|
"""Clear new repair issue."""
|
||||||
|
_LOGGER.debug("New issue resolved - clearing repair")
|
||||||
|
|
||||||
|
ir.async_delete_issue(
|
||||||
|
self._hass,
|
||||||
|
DOMAIN,
|
||||||
|
f"new_issue_{self._entry_id}",
|
||||||
|
)
|
||||||
|
self._new_repair_active = False
|
||||||
|
```
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
1. **Always use state tracking** - Prevents duplicate repair creation
|
||||||
|
2. **Auto-clear when resolved** - Improves user experience
|
||||||
|
3. **Clear on shutdown** - Prevents orphaned repairs
|
||||||
|
4. **Use descriptive issue IDs** - Include entry_id for multi-home setups
|
||||||
|
5. **Provide actionable guidance** - Tell users what they can do
|
||||||
|
6. **Use appropriate severity** - WARNING for most cases, ERROR only for critical
|
||||||
|
7. **Test all language translations** - Ensure placeholders work correctly
|
||||||
|
8. **Document expected behavior** - What triggers, what clears, what user should do
|
||||||
|
|
||||||
|
## Future Enhancements
|
||||||
|
|
||||||
|
Potential additions to the repairs system:
|
||||||
|
|
||||||
|
- **Stale data warning**: Alert when cache is >24 hours old with no API updates
|
||||||
|
- **Missing permissions**: Detect insufficient API token scopes
|
||||||
|
- **Config migration needed**: Notify users of breaking changes requiring reconfiguration
|
||||||
|
- **Extreme price alert**: Warn when prices exceed historical thresholds (optional, user-configurable)
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- Home Assistant Repairs Documentation: https://developers.home-assistant.io/docs/core/platform/repairs
|
||||||
|
- Issue Registry API: `homeassistant.helpers.issue_registry`
|
||||||
|
- Integration Constants: `custom_components/tibber_prices/const.py`
|
||||||
|
- Repair Manager Implementation: `custom_components/tibber_prices/coordinator/repairs.py`
|
||||||
57
docs/developer/versioned_docs/version-v0.23.1/setup.md
Normal file
57
docs/developer/versioned_docs/version-v0.23.1/setup.md
Normal file
|
|
@ -0,0 +1,57 @@
|
||||||
|
# Development Setup
|
||||||
|
|
||||||
|
> **Note:** This guide is under construction. For now, please refer to [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.23.1/AGENTS.md) for detailed setup information.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- VS Code with Dev Container support
|
||||||
|
- Docker installed and running
|
||||||
|
- GitHub account (for Tibber API token)
|
||||||
|
|
||||||
|
## Quick Setup
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clone the repository
|
||||||
|
git clone https://github.com/jpawlowski/hass.tibber_prices.git
|
||||||
|
cd hass.tibber_prices
|
||||||
|
|
||||||
|
# Open in VS Code
|
||||||
|
code .
|
||||||
|
|
||||||
|
# Reopen in DevContainer (VS Code will prompt)
|
||||||
|
# Or manually: Ctrl+Shift+P → "Dev Containers: Reopen in Container"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Development Environment
|
||||||
|
|
||||||
|
The DevContainer includes:
|
||||||
|
|
||||||
|
- Python 3.13 with `.venv` at `/home/vscode/.venv/`
|
||||||
|
- `uv` package manager (fast, modern Python tooling)
|
||||||
|
- Home Assistant development dependencies
|
||||||
|
- Ruff linter/formatter
|
||||||
|
- Git, GitHub CLI, Node.js, Rust toolchain
|
||||||
|
|
||||||
|
## Running the Integration
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start Home Assistant in debug mode
|
||||||
|
./scripts/develop
|
||||||
|
```
|
||||||
|
|
||||||
|
Visit http://localhost:8123
|
||||||
|
|
||||||
|
## Making Changes
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Lint and format code
|
||||||
|
./scripts/lint
|
||||||
|
|
||||||
|
# Check-only (CI mode)
|
||||||
|
./scripts/lint-check
|
||||||
|
|
||||||
|
# Validate integration structure
|
||||||
|
./scripts/release/hassfest
|
||||||
|
```
|
||||||
|
|
||||||
|
See [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.23.1/AGENTS.md) for detailed patterns and conventions.
|
||||||
52
docs/developer/versioned_docs/version-v0.23.1/testing.md
Normal file
52
docs/developer/versioned_docs/version-v0.23.1/testing.md
Normal file
|
|
@ -0,0 +1,52 @@
|
||||||
|
# Testing
|
||||||
|
|
||||||
|
> **Note:** This guide is under construction.
|
||||||
|
|
||||||
|
## Integration Validation
|
||||||
|
|
||||||
|
Before running tests or committing changes, validate the integration structure:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run local validation (JSON syntax, Python syntax, required files)
|
||||||
|
./scripts/release/hassfest
|
||||||
|
```
|
||||||
|
|
||||||
|
This lightweight script checks:
|
||||||
|
|
||||||
|
- ✓ `config_flow.py` exists
|
||||||
|
- ✓ `manifest.json` is valid JSON with required fields
|
||||||
|
- ✓ Translation files have valid JSON syntax
|
||||||
|
- ✓ All Python files compile without syntax errors
|
||||||
|
|
||||||
|
**Note:** Full hassfest validation runs in GitHub Actions on push.
|
||||||
|
|
||||||
|
## Running Tests
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run all tests
|
||||||
|
pytest tests/
|
||||||
|
|
||||||
|
# Run specific test file
|
||||||
|
pytest tests/test_coordinator.py
|
||||||
|
|
||||||
|
# Run with coverage
|
||||||
|
pytest --cov=custom_components.tibber_prices tests/
|
||||||
|
```
|
||||||
|
|
||||||
|
## Manual Testing
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start development environment
|
||||||
|
./scripts/develop
|
||||||
|
```
|
||||||
|
|
||||||
|
Then test in Home Assistant UI:
|
||||||
|
|
||||||
|
- Configuration flow
|
||||||
|
- Sensor states and attributes
|
||||||
|
- Services
|
||||||
|
- Translation strings
|
||||||
|
|
||||||
|
## Test Guidelines
|
||||||
|
|
||||||
|
Coming soon...
|
||||||
|
|
@ -0,0 +1,433 @@
|
||||||
|
---
|
||||||
|
comments: false
|
||||||
|
---
|
||||||
|
|
||||||
|
# Timer Architecture
|
||||||
|
|
||||||
|
This document explains the timer/scheduler system in the Tibber Prices integration - what runs when, why, and how they coordinate.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The integration uses **three independent timer mechanisms** for different purposes:
|
||||||
|
|
||||||
|
| Timer | Type | Interval | Purpose | Trigger Method |
|
||||||
|
|-------|------|----------|---------|----------------|
|
||||||
|
| **Timer #1** | HA built-in | 15 minutes | API data updates | `DataUpdateCoordinator` |
|
||||||
|
| **Timer #2** | Custom | :00, :15, :30, :45 | Entity state refresh | `async_track_utc_time_change()` |
|
||||||
|
| **Timer #3** | Custom | Every minute | Countdown/progress | `async_track_utc_time_change()` |
|
||||||
|
|
||||||
|
**Key principle:** Timer #1 (HA) controls **data fetching**, Timer #2 controls **entity updates**, Timer #3 controls **timing displays**.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Timer #1: DataUpdateCoordinator (HA Built-in)
|
||||||
|
|
||||||
|
**File:** `coordinator/core.py` → `TibberPricesDataUpdateCoordinator`
|
||||||
|
|
||||||
|
**Type:** Home Assistant's built-in `DataUpdateCoordinator` with `UPDATE_INTERVAL = 15 minutes`
|
||||||
|
|
||||||
|
**What it is:**
|
||||||
|
- HA provides this timer system automatically when you inherit from `DataUpdateCoordinator`
|
||||||
|
- Triggers `_async_update_data()` method every 15 minutes
|
||||||
|
- **Not** synchronized to clock boundaries (each installation has different start time)
|
||||||
|
|
||||||
|
**Purpose:** Check if fresh API data is needed, fetch if necessary
|
||||||
|
|
||||||
|
**What it does:**
|
||||||
|
|
||||||
|
```python
|
||||||
|
async def _async_update_data(self) -> TibberPricesData:
|
||||||
|
# Step 1: Check midnight turnover FIRST (prevents race with Timer #2)
|
||||||
|
if self._check_midnight_turnover_needed(dt_util.now()):
|
||||||
|
await self._perform_midnight_data_rotation(dt_util.now())
|
||||||
|
# Notify ALL entities after midnight turnover
|
||||||
|
return self.data # Early return
|
||||||
|
|
||||||
|
# Step 2: Check if we need tomorrow data (after 13:00)
|
||||||
|
if self._should_update_price_data() == "tomorrow_check":
|
||||||
|
await self._fetch_and_update_data() # Fetch from API
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
# Step 3: Use cached data (fast path - most common)
|
||||||
|
return self.data
|
||||||
|
```
|
||||||
|
|
||||||
|
**Load Distribution:**
|
||||||
|
- Each HA installation starts Timer #1 at different times → natural distribution
|
||||||
|
- Tomorrow data check adds 0-30s random delay → prevents "thundering herd" on Tibber API
|
||||||
|
- Result: API load spread over ~30 minutes instead of all at once
|
||||||
|
|
||||||
|
**Midnight Coordination:**
|
||||||
|
- Atomic check: `_check_midnight_turnover_needed(now)` compares dates only (no side effects)
|
||||||
|
- If midnight turnover needed → performs it and returns early
|
||||||
|
- Timer #2 will see turnover already done and skip gracefully
|
||||||
|
|
||||||
|
**Why we use HA's timer:**
|
||||||
|
- Automatic restart after HA restart
|
||||||
|
- Built-in retry logic for temporary failures
|
||||||
|
- Standard HA integration pattern
|
||||||
|
- Handles backpressure (won't queue up if previous update still running)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Timer #2: Quarter-Hour Refresh (Custom)
|
||||||
|
|
||||||
|
**File:** `coordinator/listeners.py` → `ListenerManager.schedule_quarter_hour_refresh()`
|
||||||
|
|
||||||
|
**Type:** Custom timer using `async_track_utc_time_change(minute=[0, 15, 30, 45], second=0)`
|
||||||
|
|
||||||
|
**Purpose:** Update time-sensitive entity states at interval boundaries **without waiting for API poll**
|
||||||
|
|
||||||
|
**Problem it solves:**
|
||||||
|
- Timer #1 runs every 15 minutes but NOT synchronized to clock (:03, :18, :33, :48)
|
||||||
|
- Current price changes at :00, :15, :30, :45 → entities would show stale data for up to 15 minutes
|
||||||
|
- Example: 14:00 new price, but Timer #1 ran at 13:58 → next update at 14:13 → users see old price until 14:13
|
||||||
|
|
||||||
|
**What it does:**
|
||||||
|
|
||||||
|
```python
|
||||||
|
async def _handle_quarter_hour_refresh(self, now: datetime) -> None:
|
||||||
|
# Step 1: Check midnight turnover (coordinates with Timer #1)
|
||||||
|
if self._check_midnight_turnover_needed(now):
|
||||||
|
# Timer #1 might have already done this → atomic check handles it
|
||||||
|
await self._perform_midnight_data_rotation(now)
|
||||||
|
# Notify ALL entities after midnight turnover
|
||||||
|
return
|
||||||
|
|
||||||
|
# Step 2: Normal quarter-hour refresh (most common path)
|
||||||
|
# Only notify time-sensitive entities (current_interval_price, etc.)
|
||||||
|
self._listener_manager.async_update_time_sensitive_listeners()
|
||||||
|
```
|
||||||
|
|
||||||
|
**Smart Boundary Tolerance:**
|
||||||
|
- Uses `round_to_nearest_quarter_hour()` with ±2 second tolerance
|
||||||
|
- HA may schedule timer at 14:59:58 → rounds to 15:00:00 (shows new interval)
|
||||||
|
- HA restart at 14:59:30 → stays at 14:45:00 (shows current interval)
|
||||||
|
- See [Architecture](./architecture.md#3-quarter-hour-precision) for details
|
||||||
|
|
||||||
|
**Absolute Time Scheduling:**
|
||||||
|
- `async_track_utc_time_change()` plans for **all future boundaries** (15:00, 15:15, 15:30, ...)
|
||||||
|
- NOT relative delays ("in 15 minutes")
|
||||||
|
- If triggered at 14:59:58 → next trigger is 15:15:00, NOT 15:00:00 (prevents double updates)
|
||||||
|
|
||||||
|
**Which entities listen:**
|
||||||
|
- All sensors that depend on "current interval" (e.g., `current_interval_price`, `next_interval_price`)
|
||||||
|
- Binary sensors that check "is now in period?" (e.g., `best_price_period_active`)
|
||||||
|
- ~50-60 entities out of 120+ total
|
||||||
|
|
||||||
|
**Why custom timer:**
|
||||||
|
- HA's built-in coordinator doesn't support exact boundary timing
|
||||||
|
- We need **absolute time** triggers, not periodic intervals
|
||||||
|
- Allows fast entity updates without expensive data transformation
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Timer #3: Minute Refresh (Custom)
|
||||||
|
|
||||||
|
**File:** `coordinator/listeners.py` → `ListenerManager.schedule_minute_refresh()`
|
||||||
|
|
||||||
|
**Type:** Custom timer using `async_track_utc_time_change(second=0)` (every minute)
|
||||||
|
|
||||||
|
**Purpose:** Update countdown and progress sensors for smooth UX
|
||||||
|
|
||||||
|
**What it does:**
|
||||||
|
|
||||||
|
```python
|
||||||
|
async def _handle_minute_refresh(self, now: datetime) -> None:
|
||||||
|
# Only notify minute-update entities
|
||||||
|
# No data fetching, no transformation, no midnight handling
|
||||||
|
self._listener_manager.async_update_minute_listeners()
|
||||||
|
```
|
||||||
|
|
||||||
|
**Which entities listen:**
|
||||||
|
- `best_price_remaining_minutes` - Countdown timer
|
||||||
|
- `peak_price_remaining_minutes` - Countdown timer
|
||||||
|
- `best_price_progress` - Progress bar (0-100%)
|
||||||
|
- `peak_price_progress` - Progress bar (0-100%)
|
||||||
|
- ~10 entities total
|
||||||
|
|
||||||
|
**Why custom timer:**
|
||||||
|
- Users want smooth countdowns (not jumping 15 minutes at a time)
|
||||||
|
- Progress bars need minute-by-minute updates
|
||||||
|
- Very lightweight (no data processing, just state recalculation)
|
||||||
|
|
||||||
|
**Why NOT every second:**
|
||||||
|
- Minute precision sufficient for countdown UX
|
||||||
|
- Reduces CPU load (60× fewer updates than seconds)
|
||||||
|
- Home Assistant best practice (avoid sub-minute updates)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Listener Pattern (Python/HA Terminology)
|
||||||
|
|
||||||
|
**Your question:** "Sind Timer für dich eigentlich 'Listener'?"
|
||||||
|
|
||||||
|
**Answer:** In Home Assistant terminology:
|
||||||
|
|
||||||
|
- **Timer** = The mechanism that triggers at specific times (`async_track_utc_time_change`)
|
||||||
|
- **Listener** = A callback function that gets called when timer triggers
|
||||||
|
- **Observer Pattern** = Entities register callbacks, coordinator notifies them
|
||||||
|
|
||||||
|
**How it works:**
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Entity registers a listener callback
|
||||||
|
class TibberPricesSensor(CoordinatorEntity):
|
||||||
|
async def async_added_to_hass(self):
|
||||||
|
# Register this entity's update callback
|
||||||
|
self._remove_listener = self.coordinator.async_add_time_sensitive_listener(
|
||||||
|
self._handle_coordinator_update
|
||||||
|
)
|
||||||
|
|
||||||
|
# Coordinator maintains list of listeners
|
||||||
|
class ListenerManager:
|
||||||
|
def __init__(self):
|
||||||
|
self._time_sensitive_listeners = [] # List of callbacks
|
||||||
|
|
||||||
|
def async_add_time_sensitive_listener(self, callback):
|
||||||
|
self._time_sensitive_listeners.append(callback)
|
||||||
|
|
||||||
|
def async_update_time_sensitive_listeners(self):
|
||||||
|
# Timer triggered → notify all listeners
|
||||||
|
for callback in self._time_sensitive_listeners:
|
||||||
|
callback() # Entity updates itself
|
||||||
|
```
|
||||||
|
|
||||||
|
**Why this pattern:**
|
||||||
|
- Decouples timer logic from entity logic
|
||||||
|
- One timer can notify many entities efficiently
|
||||||
|
- Entities can unregister when removed (cleanup)
|
||||||
|
- Standard HA pattern for coordinator-based integrations
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Timer Coordination Scenarios
|
||||||
|
|
||||||
|
### Scenario 1: Normal Operation (No Midnight)
|
||||||
|
|
||||||
|
```
|
||||||
|
14:00:00 → Timer #2 triggers
|
||||||
|
→ Update time-sensitive entities (current price changed)
|
||||||
|
→ 60 entities updated (~5ms)
|
||||||
|
|
||||||
|
14:03:12 → Timer #1 triggers (HA's 15-min cycle)
|
||||||
|
→ Check if tomorrow data needed (no, still cached)
|
||||||
|
→ Return cached data (fast path, ~2ms)
|
||||||
|
|
||||||
|
14:15:00 → Timer #2 triggers
|
||||||
|
→ Update time-sensitive entities
|
||||||
|
→ 60 entities updated (~5ms)
|
||||||
|
|
||||||
|
14:16:00 → Timer #3 triggers
|
||||||
|
→ Update countdown/progress entities
|
||||||
|
→ 10 entities updated (~1ms)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Key observation:** Timer #1 and Timer #2 run **independently**, no conflicts.
|
||||||
|
|
||||||
|
### Scenario 2: Midnight Turnover
|
||||||
|
|
||||||
|
```
|
||||||
|
23:45:12 → Timer #1 triggers
|
||||||
|
→ Check midnight: current_date=2025-11-17, last_check=2025-11-17
|
||||||
|
→ No turnover needed
|
||||||
|
→ Return cached data
|
||||||
|
|
||||||
|
00:00:00 → Timer #2 triggers FIRST (synchronized to midnight)
|
||||||
|
→ Check midnight: current_date=2025-11-18, last_check=2025-11-17
|
||||||
|
→ Turnover needed! Perform rotation, save cache
|
||||||
|
→ _last_midnight_check = 2025-11-18
|
||||||
|
→ Notify ALL entities
|
||||||
|
|
||||||
|
00:03:12 → Timer #1 triggers (its regular cycle)
|
||||||
|
→ Check midnight: current_date=2025-11-18, last_check=2025-11-18
|
||||||
|
→ Turnover already done → skip
|
||||||
|
→ Return existing data (fast path)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Key observation:** Atomic date comparison prevents double-turnover, whoever runs first wins.
|
||||||
|
|
||||||
|
### Scenario 3: Tomorrow Data Check (After 13:00)
|
||||||
|
|
||||||
|
```
|
||||||
|
13:00:00 → Timer #2 triggers
|
||||||
|
→ Normal quarter-hour refresh
|
||||||
|
→ Update time-sensitive entities
|
||||||
|
|
||||||
|
13:03:12 → Timer #1 triggers
|
||||||
|
→ Check tomorrow data: missing or invalid
|
||||||
|
→ Fetch from Tibber API (~300ms)
|
||||||
|
→ Transform data (~200ms)
|
||||||
|
→ Calculate periods (~100ms)
|
||||||
|
→ Notify ALL entities (new data available)
|
||||||
|
|
||||||
|
13:15:00 → Timer #2 triggers
|
||||||
|
→ Normal quarter-hour refresh (uses newly fetched data)
|
||||||
|
→ Update time-sensitive entities
|
||||||
|
```
|
||||||
|
|
||||||
|
**Key observation:** Timer #1 does expensive work (API + transform), Timer #2 does cheap work (entity notify).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Why We Keep HA's Timer (Timer #1)
|
||||||
|
|
||||||
|
**Your question:** "warum wir den HA timer trotzdem weiter benutzen, da er ja für uns unkontrollierte aktualisierte änderungen triggert"
|
||||||
|
|
||||||
|
**Answer:** You're correct that it's not synchronized, but that's actually **intentional**:
|
||||||
|
|
||||||
|
### Reason 1: Load Distribution on Tibber API
|
||||||
|
|
||||||
|
If all installations used synchronized timers:
|
||||||
|
- ❌ Everyone fetches at 13:00:00 → Tibber API overload
|
||||||
|
- ❌ Everyone fetches at 14:00:00 → Tibber API overload
|
||||||
|
- ❌ "Thundering herd" problem
|
||||||
|
|
||||||
|
With HA's unsynchronized timer:
|
||||||
|
- ✅ Installation A: 13:03:12, 13:18:12, 13:33:12, ...
|
||||||
|
- ✅ Installation B: 13:07:45, 13:22:45, 13:37:45, ...
|
||||||
|
- ✅ Installation C: 13:11:28, 13:26:28, 13:41:28, ...
|
||||||
|
- ✅ Natural distribution over ~30 minutes
|
||||||
|
- ✅ Plus: Random 0-30s delay on tomorrow checks
|
||||||
|
|
||||||
|
**Result:** API load spread evenly, no spikes.
|
||||||
|
|
||||||
|
### Reason 2: What Timer #1 Actually Checks
|
||||||
|
|
||||||
|
Timer #1 does NOT blindly update. It checks:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def _should_update_price_data(self) -> str:
|
||||||
|
# Check 1: Do we have tomorrow data? (only relevant after ~13:00)
|
||||||
|
if tomorrow_missing or tomorrow_invalid:
|
||||||
|
return "tomorrow_check" # Fetch needed
|
||||||
|
|
||||||
|
# Check 2: Is cache still valid?
|
||||||
|
if cache_valid:
|
||||||
|
return "cached" # No fetch needed (most common!)
|
||||||
|
|
||||||
|
# Check 3: Has enough time passed?
|
||||||
|
if time_since_last_update < threshold:
|
||||||
|
return "cached" # Too soon, skip fetch
|
||||||
|
|
||||||
|
return "update_needed" # Rare case
|
||||||
|
```
|
||||||
|
|
||||||
|
**Most Timer #1 cycles:** Fast path (~2ms), no API call, just returns cached data.
|
||||||
|
|
||||||
|
**API fetch only when:**
|
||||||
|
- Tomorrow data missing/invalid (after 13:00)
|
||||||
|
- Cache expired (midnight turnover)
|
||||||
|
- Explicit user refresh
|
||||||
|
|
||||||
|
### Reason 3: HA Integration Best Practices
|
||||||
|
|
||||||
|
- ✅ Standard HA pattern: `DataUpdateCoordinator` is recommended by HA docs
|
||||||
|
- ✅ Automatic retry logic for temporary API failures
|
||||||
|
- ✅ Backpressure handling (won't queue updates if previous still running)
|
||||||
|
- ✅ Developer tools integration (users can manually trigger refresh)
|
||||||
|
- ✅ Diagnostics integration (shows last update time, success/failure)
|
||||||
|
|
||||||
|
### What We DO Synchronize
|
||||||
|
|
||||||
|
- ✅ **Timer #2:** Entity state updates at exact boundaries (user-visible)
|
||||||
|
- ✅ **Timer #3:** Countdown/progress at exact minutes (user-visible)
|
||||||
|
- ❌ **Timer #1:** API fetch timing (invisible to user, distribution wanted)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Performance Characteristics
|
||||||
|
|
||||||
|
### Timer #1 (DataUpdateCoordinator)
|
||||||
|
- **Triggers:** Every 15 minutes (unsynchronized)
|
||||||
|
- **Fast path:** ~2ms (cache check, return existing data)
|
||||||
|
- **Slow path:** ~600ms (API fetch + transform + calculate)
|
||||||
|
- **Frequency:** ~96 times/day
|
||||||
|
- **API calls:** ~1-2 times/day (cached otherwise)
|
||||||
|
|
||||||
|
### Timer #2 (Quarter-Hour Refresh)
|
||||||
|
- **Triggers:** 96 times/day (exact boundaries)
|
||||||
|
- **Processing:** ~5ms (notify 60 entities)
|
||||||
|
- **No API calls:** Uses cached/transformed data
|
||||||
|
- **No transformation:** Just entity state updates
|
||||||
|
|
||||||
|
### Timer #3 (Minute Refresh)
|
||||||
|
- **Triggers:** 1440 times/day (every minute)
|
||||||
|
- **Processing:** ~1ms (notify 10 entities)
|
||||||
|
- **No API calls:** No data processing at all
|
||||||
|
- **Lightweight:** Just countdown math
|
||||||
|
|
||||||
|
**Total CPU budget:** ~15 seconds/day for all timers combined.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Debugging Timer Issues
|
||||||
|
|
||||||
|
### Check Timer #1 (HA Coordinator)
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Enable debug logging
|
||||||
|
_LOGGER.setLevel(logging.DEBUG)
|
||||||
|
|
||||||
|
# Watch for these log messages:
|
||||||
|
"Fetching data from API (reason: tomorrow_check)" # API call
|
||||||
|
"Using cached data (no update needed)" # Fast path
|
||||||
|
"Midnight turnover detected (Timer #1)" # Turnover
|
||||||
|
```
|
||||||
|
|
||||||
|
### Check Timer #2 (Quarter-Hour)
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Watch coordinator logs:
|
||||||
|
"Updated 60 time-sensitive entities at quarter-hour boundary" # Normal
|
||||||
|
"Midnight turnover detected (Timer #2)" # Turnover
|
||||||
|
```
|
||||||
|
|
||||||
|
### Check Timer #3 (Minute)
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Watch coordinator logs:
|
||||||
|
"Updated 10 minute-update entities" # Every minute
|
||||||
|
```
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
1. **Timer #2 not triggering:**
|
||||||
|
- Check: `schedule_quarter_hour_refresh()` called in `__init__`?
|
||||||
|
- Check: `_quarter_hour_timer_cancel` properly stored?
|
||||||
|
|
||||||
|
2. **Double updates at midnight:**
|
||||||
|
- Should NOT happen (atomic coordination)
|
||||||
|
- Check: Both timers use same date comparison logic?
|
||||||
|
|
||||||
|
3. **API overload:**
|
||||||
|
- Check: Random delay working? (0-30s jitter on tomorrow check)
|
||||||
|
- Check: Cache validation logic correct?
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Related Documentation
|
||||||
|
|
||||||
|
- **[Architecture](./architecture.md)** - Overall system design, data flow
|
||||||
|
- **[Caching Strategy](./caching-strategy.md)** - Cache lifetimes, invalidation, midnight turnover
|
||||||
|
- **[AGENTS.md](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.23.1/AGENTS.md)** - Complete reference for AI development
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
**Three independent timers:**
|
||||||
|
1. **Timer #1** (HA built-in, 15 min, unsynchronized) → Data fetching (when needed)
|
||||||
|
2. **Timer #2** (Custom, :00/:15/:30/:45) → Entity state updates (always)
|
||||||
|
3. **Timer #3** (Custom, every minute) → Countdown/progress (always)
|
||||||
|
|
||||||
|
**Key insights:**
|
||||||
|
- Timer #1 unsynchronized = good (load distribution on API)
|
||||||
|
- Timer #2 synchronized = good (user sees correct data immediately)
|
||||||
|
- Timer #3 synchronized = good (smooth countdown UX)
|
||||||
|
- All three coordinate gracefully (atomic midnight checks, no conflicts)
|
||||||
|
|
||||||
|
**"Listener" terminology:**
|
||||||
|
- Timer = mechanism that triggers
|
||||||
|
- Listener = callback that gets called
|
||||||
|
- Observer pattern = entities register, coordinator notifies
|
||||||
186
docs/developer/versioned_docs/version-v0.24.0/api-reference.md
Normal file
186
docs/developer/versioned_docs/version-v0.24.0/api-reference.md
Normal file
|
|
@ -0,0 +1,186 @@
|
||||||
|
---
|
||||||
|
comments: false
|
||||||
|
---
|
||||||
|
|
||||||
|
# API Reference
|
||||||
|
|
||||||
|
Documentation of the Tibber GraphQL API used by this integration.
|
||||||
|
|
||||||
|
## GraphQL Endpoint
|
||||||
|
|
||||||
|
```
|
||||||
|
https://api.tibber.com/v1-beta/gql
|
||||||
|
```
|
||||||
|
|
||||||
|
**Authentication:** Bearer token in `Authorization` header
|
||||||
|
|
||||||
|
## Queries Used
|
||||||
|
|
||||||
|
### User Data Query
|
||||||
|
|
||||||
|
Fetches home information and metadata:
|
||||||
|
|
||||||
|
```graphql
|
||||||
|
query {
|
||||||
|
viewer {
|
||||||
|
homes {
|
||||||
|
id
|
||||||
|
appNickname
|
||||||
|
address {
|
||||||
|
address1
|
||||||
|
postalCode
|
||||||
|
city
|
||||||
|
country
|
||||||
|
}
|
||||||
|
timeZone
|
||||||
|
currentSubscription {
|
||||||
|
priceInfo {
|
||||||
|
current {
|
||||||
|
currency
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
meteringPointData {
|
||||||
|
consumptionEan
|
||||||
|
gridAreaCode
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Cached for:** 24 hours
|
||||||
|
|
||||||
|
### Price Data Query
|
||||||
|
|
||||||
|
Fetches quarter-hourly prices:
|
||||||
|
|
||||||
|
```graphql
|
||||||
|
query($homeId: ID!) {
|
||||||
|
viewer {
|
||||||
|
home(id: $homeId) {
|
||||||
|
currentSubscription {
|
||||||
|
priceInfo {
|
||||||
|
range(resolution: QUARTER_HOURLY, first: 384) {
|
||||||
|
nodes {
|
||||||
|
total
|
||||||
|
startsAt
|
||||||
|
level
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
- `homeId`: Tibber home identifier
|
||||||
|
- `resolution`: Always `QUARTER_HOURLY`
|
||||||
|
- `first`: 384 intervals (4 days of data)
|
||||||
|
|
||||||
|
**Cached until:** Midnight local time
|
||||||
|
|
||||||
|
## Rate Limits
|
||||||
|
|
||||||
|
Tibber API rate limits (as of 2024):
|
||||||
|
- **5000 requests per hour** per token
|
||||||
|
- **Burst limit:** 100 requests per minute
|
||||||
|
|
||||||
|
Integration stays well below these limits:
|
||||||
|
- Polls every 15 minutes = 96 requests/day
|
||||||
|
- User data cached for 24h = 1 request/day
|
||||||
|
- **Total:** ~100 requests/day per home
|
||||||
|
|
||||||
|
## Response Format
|
||||||
|
|
||||||
|
### Price Node Structure
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"total": 0.2456,
|
||||||
|
"startsAt": "2024-12-06T14:00:00.000+01:00",
|
||||||
|
"level": "NORMAL"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Fields:**
|
||||||
|
- `total`: Price including VAT and fees (currency's major unit, e.g., EUR)
|
||||||
|
- `startsAt`: ISO 8601 timestamp with timezone
|
||||||
|
- `level`: Tibber's own classification (VERY_CHEAP, CHEAP, NORMAL, EXPENSIVE, VERY_EXPENSIVE)
|
||||||
|
|
||||||
|
### Currency Information
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"currency": "EUR"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Supported currencies:
|
||||||
|
- `EUR` (Euro) - displayed as ct/kWh
|
||||||
|
- `NOK` (Norwegian Krone) - displayed as øre/kWh
|
||||||
|
- `SEK` (Swedish Krona) - displayed as öre/kWh
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
### Common Error Responses
|
||||||
|
|
||||||
|
**Invalid Token:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"errors": [{
|
||||||
|
"message": "Unauthorized",
|
||||||
|
"extensions": {
|
||||||
|
"code": "UNAUTHENTICATED"
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Rate Limit Exceeded:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"errors": [{
|
||||||
|
"message": "Too Many Requests",
|
||||||
|
"extensions": {
|
||||||
|
"code": "RATE_LIMIT_EXCEEDED"
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Home Not Found:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"errors": [{
|
||||||
|
"message": "Home not found",
|
||||||
|
"extensions": {
|
||||||
|
"code": "NOT_FOUND"
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Integration handles these with:
|
||||||
|
- Exponential backoff retry (3 attempts)
|
||||||
|
- ConfigEntryAuthFailed for auth errors
|
||||||
|
- ConfigEntryNotReady for temporary failures
|
||||||
|
|
||||||
|
## Data Transformation
|
||||||
|
|
||||||
|
Raw API data is enriched with:
|
||||||
|
- **Trailing 24h average** - Calculated from previous intervals
|
||||||
|
- **Leading 24h average** - Calculated from future intervals
|
||||||
|
- **Price difference %** - Deviation from average
|
||||||
|
- **Custom rating** - Based on user thresholds (different from Tibber's `level`)
|
||||||
|
|
||||||
|
See `utils/price.py` for enrichment logic.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
💡 **External Resources:**
|
||||||
|
- [Tibber API Documentation](https://developer.tibber.com/docs/overview)
|
||||||
|
- [GraphQL Explorer](https://developer.tibber.com/explorer)
|
||||||
|
- [Get API Token](https://developer.tibber.com/settings/access-token)
|
||||||
358
docs/developer/versioned_docs/version-v0.24.0/architecture.md
Normal file
358
docs/developer/versioned_docs/version-v0.24.0/architecture.md
Normal file
|
|
@ -0,0 +1,358 @@
|
||||||
|
---
|
||||||
|
comments: false
|
||||||
|
---
|
||||||
|
|
||||||
|
# Architecture
|
||||||
|
|
||||||
|
This document provides a visual overview of the integration's architecture, focusing on end-to-end data flow and caching layers.
|
||||||
|
|
||||||
|
For detailed implementation patterns, see [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.24.0/AGENTS.md).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## End-to-End Data Flow
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TB
|
||||||
|
%% External Systems
|
||||||
|
TIBBER[("🌐 Tibber GraphQL API<br/>api.tibber.com")]
|
||||||
|
HA[("🏠 Home Assistant<br/>Core")]
|
||||||
|
|
||||||
|
%% Entry Point
|
||||||
|
SETUP["__init__.py<br/>async_setup_entry()"]
|
||||||
|
|
||||||
|
%% Core Components
|
||||||
|
API["api.py<br/>TibberPricesApiClient<br/><br/>GraphQL queries"]
|
||||||
|
COORD["coordinator.py<br/>TibberPricesDataUpdateCoordinator<br/><br/>Orchestrates updates every 15min"]
|
||||||
|
|
||||||
|
%% Caching Layers
|
||||||
|
CACHE_API["💾 API Cache<br/>coordinator/cache.py<br/><br/>HA Storage (persistent)<br/>User: 24h | Prices: until midnight"]
|
||||||
|
CACHE_TRANS["💾 Transformation Cache<br/>coordinator/data_transformation.py<br/><br/>Memory (enriched prices)<br/>Until config change or midnight"]
|
||||||
|
CACHE_PERIOD["💾 Period Cache<br/>coordinator/periods.py<br/><br/>Memory (calculated periods)<br/>Hash-based invalidation"]
|
||||||
|
CACHE_CONFIG["💾 Config Cache<br/>coordinator/*<br/><br/>Memory (parsed options)<br/>Until config change"]
|
||||||
|
CACHE_TRANS_TEXT["💾 Translation Cache<br/>const.py<br/><br/>Memory (UI strings)<br/>Until HA restart"]
|
||||||
|
|
||||||
|
%% Processing Components
|
||||||
|
TRANSFORM["coordinator/data_transformation.py<br/>DataTransformer<br/><br/>Enrich prices with statistics"]
|
||||||
|
PERIODS["coordinator/periods.py<br/>PeriodCalculator<br/><br/>Calculate best/peak periods"]
|
||||||
|
ENRICH["price_utils.py + average_utils.py<br/><br/>Calculate trailing/leading averages<br/>rating_level, differences"]
|
||||||
|
|
||||||
|
%% Output Components
|
||||||
|
SENSORS["sensor/<br/>TibberPricesSensor<br/><br/>120+ price/level/rating sensors"]
|
||||||
|
BINARY["binary_sensor/<br/>TibberPricesBinarySensor<br/><br/>Period indicators"]
|
||||||
|
SERVICES["services/<br/><br/>Custom service endpoints<br/>(get_chartdata, ApexCharts)"]
|
||||||
|
|
||||||
|
%% Flow Connections
|
||||||
|
TIBBER -->|"Query user data<br/>Query prices<br/>(yesterday/today/tomorrow)"| API
|
||||||
|
|
||||||
|
API -->|"Raw GraphQL response"| COORD
|
||||||
|
|
||||||
|
COORD -->|"Check cache first"| CACHE_API
|
||||||
|
CACHE_API -.->|"Cache hit:<br/>Return cached"| COORD
|
||||||
|
CACHE_API -.->|"Cache miss:<br/>Fetch from API"| API
|
||||||
|
|
||||||
|
COORD -->|"Raw price data"| TRANSFORM
|
||||||
|
TRANSFORM -->|"Check cache"| CACHE_TRANS
|
||||||
|
CACHE_TRANS -.->|"Cache hit"| TRANSFORM
|
||||||
|
CACHE_TRANS -.->|"Cache miss"| ENRICH
|
||||||
|
ENRICH -->|"Enriched data"| TRANSFORM
|
||||||
|
|
||||||
|
TRANSFORM -->|"Enriched price data"| COORD
|
||||||
|
|
||||||
|
COORD -->|"Enriched data"| PERIODS
|
||||||
|
PERIODS -->|"Check cache"| CACHE_PERIOD
|
||||||
|
CACHE_PERIOD -.->|"Hash match:<br/>Return cached"| PERIODS
|
||||||
|
CACHE_PERIOD -.->|"Hash mismatch:<br/>Recalculate"| PERIODS
|
||||||
|
|
||||||
|
PERIODS -->|"Calculated periods"| COORD
|
||||||
|
|
||||||
|
COORD -->|"Complete data<br/>(prices + periods)"| SENSORS
|
||||||
|
COORD -->|"Complete data"| BINARY
|
||||||
|
COORD -->|"Data access"| SERVICES
|
||||||
|
|
||||||
|
SENSORS -->|"Entity states"| HA
|
||||||
|
BINARY -->|"Entity states"| HA
|
||||||
|
SERVICES -->|"Service responses"| HA
|
||||||
|
|
||||||
|
%% Config access
|
||||||
|
CACHE_CONFIG -.->|"Parsed options"| TRANSFORM
|
||||||
|
CACHE_CONFIG -.->|"Parsed options"| PERIODS
|
||||||
|
CACHE_TRANS_TEXT -.->|"UI strings"| SENSORS
|
||||||
|
CACHE_TRANS_TEXT -.->|"UI strings"| BINARY
|
||||||
|
|
||||||
|
SETUP -->|"Initialize"| COORD
|
||||||
|
SETUP -->|"Register"| SENSORS
|
||||||
|
SETUP -->|"Register"| BINARY
|
||||||
|
SETUP -->|"Register"| SERVICES
|
||||||
|
|
||||||
|
%% Styling
|
||||||
|
classDef external fill:#e1f5ff,stroke:#0288d1,stroke-width:3px
|
||||||
|
classDef cache fill:#fff3e0,stroke:#f57c00,stroke-width:2px
|
||||||
|
classDef processing fill:#f3e5f5,stroke:#7b1fa2,stroke-width:2px
|
||||||
|
classDef output fill:#e8f5e9,stroke:#388e3c,stroke-width:2px
|
||||||
|
|
||||||
|
class TIBBER,HA external
|
||||||
|
class CACHE_API,CACHE_TRANS,CACHE_PERIOD,CACHE_CONFIG,CACHE_TRANS_TEXT cache
|
||||||
|
class TRANSFORM,PERIODS,ENRICH processing
|
||||||
|
class SENSORS,BINARY,SERVICES output
|
||||||
|
```
|
||||||
|
|
||||||
|
### Flow Description
|
||||||
|
|
||||||
|
1. **Setup** (`__init__.py`)
|
||||||
|
- Integration loads, creates coordinator instance
|
||||||
|
- Registers entity platforms (sensor, binary_sensor)
|
||||||
|
- Sets up custom services
|
||||||
|
|
||||||
|
2. **Data Fetch** (every 15 minutes)
|
||||||
|
- Coordinator triggers update via `api.py`
|
||||||
|
- API client checks **persistent cache** first (`coordinator/cache.py`)
|
||||||
|
- If cache valid → return cached data
|
||||||
|
- If cache stale → query Tibber GraphQL API
|
||||||
|
- Store fresh data in persistent cache (survives HA restart)
|
||||||
|
|
||||||
|
3. **Price Enrichment**
|
||||||
|
- Coordinator passes raw prices to `DataTransformer`
|
||||||
|
- Transformer checks **transformation cache** (memory)
|
||||||
|
- If cache valid → return enriched data
|
||||||
|
- If cache invalid → enrich via `price_utils.py` + `average_utils.py`
|
||||||
|
- Calculate 24h trailing/leading averages
|
||||||
|
- Calculate price differences (% from average)
|
||||||
|
- Assign rating levels (LOW/NORMAL/HIGH)
|
||||||
|
- Store enriched data in transformation cache
|
||||||
|
|
||||||
|
4. **Period Calculation**
|
||||||
|
- Coordinator passes enriched data to `PeriodCalculator`
|
||||||
|
- Calculator computes **hash** from prices + config
|
||||||
|
- If hash matches cache → return cached periods
|
||||||
|
- If hash differs → recalculate best/peak price periods
|
||||||
|
- Store periods with new hash
|
||||||
|
|
||||||
|
5. **Entity Updates**
|
||||||
|
- Coordinator provides complete data (prices + periods)
|
||||||
|
- Sensors read values via unified handlers
|
||||||
|
- Binary sensors evaluate period states
|
||||||
|
- Entities update on quarter-hour boundaries (00/15/30/45)
|
||||||
|
|
||||||
|
6. **Service Calls**
|
||||||
|
- Custom services access coordinator data directly
|
||||||
|
- Return formatted responses (JSON, ApexCharts format)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Caching Architecture
|
||||||
|
|
||||||
|
### Overview
|
||||||
|
|
||||||
|
The integration uses **5 independent caching layers** for optimal performance:
|
||||||
|
|
||||||
|
| Layer | Location | Lifetime | Invalidation | Memory |
|
||||||
|
|-------|----------|----------|--------------|--------|
|
||||||
|
| **API Cache** | `coordinator/cache.py` | 24h (user)<br/>Until midnight (prices) | Automatic | 50KB |
|
||||||
|
| **Translation Cache** | `const.py` | Until HA restart | Never | 5KB |
|
||||||
|
| **Config Cache** | `coordinator/*` | Until config change | Explicit | 1KB |
|
||||||
|
| **Period Cache** | `coordinator/periods.py` | Until data/config change | Hash-based | 10KB |
|
||||||
|
| **Transformation Cache** | `coordinator/data_transformation.py` | Until midnight/config | Automatic | 60KB |
|
||||||
|
|
||||||
|
**Total cache overhead:** ~126KB per coordinator instance (main entry + subentries)
|
||||||
|
|
||||||
|
### Cache Coordination
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart LR
|
||||||
|
USER[("User changes options")]
|
||||||
|
MIDNIGHT[("Midnight turnover")]
|
||||||
|
NEWDATA[("Tomorrow data arrives")]
|
||||||
|
|
||||||
|
USER -->|"Explicit invalidation"| CONFIG["Config Cache<br/>❌ Clear"]
|
||||||
|
USER -->|"Explicit invalidation"| PERIOD["Period Cache<br/>❌ Clear"]
|
||||||
|
USER -->|"Explicit invalidation"| TRANS["Transformation Cache<br/>❌ Clear"]
|
||||||
|
|
||||||
|
MIDNIGHT -->|"Date validation"| API["API Cache<br/>❌ Clear prices"]
|
||||||
|
MIDNIGHT -->|"Date check"| TRANS
|
||||||
|
|
||||||
|
NEWDATA -->|"Hash mismatch"| PERIOD
|
||||||
|
|
||||||
|
CONFIG -.->|"Next access"| CONFIG_NEW["Reparse options"]
|
||||||
|
PERIOD -.->|"Next access"| PERIOD_NEW["Recalculate"]
|
||||||
|
TRANS -.->|"Next access"| TRANS_NEW["Re-enrich"]
|
||||||
|
API -.->|"Next access"| API_NEW["Fetch from API"]
|
||||||
|
|
||||||
|
classDef invalid fill:#ffebee,stroke:#c62828,stroke-width:2px
|
||||||
|
classDef rebuild fill:#e8f5e9,stroke:#388e3c,stroke-width:2px
|
||||||
|
|
||||||
|
class CONFIG,PERIOD,TRANS,API invalid
|
||||||
|
class CONFIG_NEW,PERIOD_NEW,TRANS_NEW,API_NEW rebuild
|
||||||
|
```
|
||||||
|
|
||||||
|
**Key insight:** No cascading invalidations - each cache is independent and rebuilds on-demand.
|
||||||
|
|
||||||
|
For detailed cache behavior, see [Caching Strategy](./caching-strategy.md).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Component Responsibilities
|
||||||
|
|
||||||
|
### Core Components
|
||||||
|
|
||||||
|
| Component | File | Responsibility |
|
||||||
|
|-----------|------|----------------|
|
||||||
|
| **API Client** | `api.py` | GraphQL queries to Tibber, retry logic, error handling |
|
||||||
|
| **Coordinator** | `coordinator.py` | Update orchestration, cache management, absolute-time scheduling with boundary tolerance |
|
||||||
|
| **Data Transformer** | `coordinator/data_transformation.py` | Price enrichment (averages, ratings, differences) |
|
||||||
|
| **Period Calculator** | `coordinator/periods.py` | Best/peak price period calculation with relaxation |
|
||||||
|
| **Sensors** | `sensor/` | 80+ entities for prices, levels, ratings, statistics |
|
||||||
|
| **Binary Sensors** | `binary_sensor/` | Period indicators (best/peak price active) |
|
||||||
|
| **Services** | `services/` | Custom service endpoints (get_chartdata, get_apexcharts_yaml, refresh_user_data) |
|
||||||
|
|
||||||
|
### Sensor Architecture (Calculator Pattern)
|
||||||
|
|
||||||
|
The sensor platform uses **Calculator Pattern** for clean separation of concerns (refactored Nov 2025):
|
||||||
|
|
||||||
|
| Component | Files | Lines | Responsibility |
|
||||||
|
|-----------|-------|-------|----------------|
|
||||||
|
| **Entity Class** | `sensor/core.py` | 909 | Entity lifecycle, coordinator, delegates to calculators |
|
||||||
|
| **Calculators** | `sensor/calculators/` | 1,838 | Business logic (8 specialized calculators) |
|
||||||
|
| **Attributes** | `sensor/attributes/` | 1,209 | State presentation (8 specialized modules) |
|
||||||
|
| **Routing** | `sensor/value_getters.py` | 276 | Centralized sensor → calculator mapping |
|
||||||
|
| **Chart Export** | `sensor/chart_data.py` | 144 | Service call handling, YAML parsing |
|
||||||
|
| **Helpers** | `sensor/helpers.py` | 188 | Aggregation functions, utilities |
|
||||||
|
|
||||||
|
**Calculator Package** (`sensor/calculators/`):
|
||||||
|
- `base.py` - Abstract BaseCalculator with coordinator access
|
||||||
|
- `interval.py` - Single interval calculations (current/next/previous)
|
||||||
|
- `rolling_hour.py` - 5-interval rolling windows
|
||||||
|
- `daily_stat.py` - Calendar day min/max/avg statistics
|
||||||
|
- `window_24h.py` - Trailing/leading 24h windows
|
||||||
|
- `volatility.py` - Price volatility analysis
|
||||||
|
- `trend.py` - Complex trend analysis with caching
|
||||||
|
- `timing.py` - Best/peak price period timing
|
||||||
|
- `metadata.py` - Home/metering metadata
|
||||||
|
|
||||||
|
**Benefits:**
|
||||||
|
- 58% reduction in core.py (2,170 → 909 lines)
|
||||||
|
- Clear separation: Calculators (logic) vs Attributes (presentation)
|
||||||
|
- Independent testability for each calculator
|
||||||
|
- Easy to add sensors: Choose calculation pattern, add to routing
|
||||||
|
|
||||||
|
### Helper Utilities
|
||||||
|
|
||||||
|
| Utility | File | Purpose |
|
||||||
|
|---------|------|---------|
|
||||||
|
| **Price Utils** | `utils/price.py` | Rating calculation, enrichment, level aggregation |
|
||||||
|
| **Average Utils** | `utils/average.py` | Trailing/leading 24h average calculations |
|
||||||
|
| **Entity Utils** | `entity_utils/` | Shared icon/color/attribute logic |
|
||||||
|
| **Translations** | `const.py` | Translation loading and caching |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Key Patterns
|
||||||
|
|
||||||
|
### 1. Dual Translation System
|
||||||
|
|
||||||
|
- **Standard translations** (`/translations/*.json`): HA-compliant schema for entity names
|
||||||
|
- **Custom translations** (`/custom_translations/*.json`): Extended descriptions, usage tips
|
||||||
|
- Both loaded at integration setup, cached in memory
|
||||||
|
- Access via `get_translation()` helper function
|
||||||
|
|
||||||
|
### 2. Price Data Enrichment
|
||||||
|
|
||||||
|
All quarter-hourly price intervals get augmented via `utils/price.py`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Original from Tibber API
|
||||||
|
{
|
||||||
|
"startsAt": "2025-11-03T14:00:00+01:00",
|
||||||
|
"total": 0.2534,
|
||||||
|
"level": "NORMAL"
|
||||||
|
}
|
||||||
|
|
||||||
|
# After enrichment (utils/price.py)
|
||||||
|
{
|
||||||
|
"startsAt": "2025-11-03T14:00:00+01:00",
|
||||||
|
"total": 0.2534,
|
||||||
|
"level": "NORMAL",
|
||||||
|
"trailing_avg_24h": 0.2312, # ← Added: 24h trailing average
|
||||||
|
"difference": 9.6, # ← Added: % diff from trailing avg
|
||||||
|
"rating_level": "NORMAL" # ← Added: LOW/NORMAL/HIGH based on thresholds
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Quarter-Hour Precision
|
||||||
|
|
||||||
|
- **API polling**: Every 15 minutes (coordinator fetch cycle)
|
||||||
|
- **Entity updates**: On 00/15/30/45-minute boundaries via `coordinator/listeners.py`
|
||||||
|
- **Timer scheduling**: Uses `async_track_utc_time_change(minute=[0, 15, 30, 45], second=0)`
|
||||||
|
- HA may trigger ±few milliseconds before/after exact boundary
|
||||||
|
- Smart boundary tolerance (±2 seconds) handles scheduling jitter in `sensor/helpers.py`
|
||||||
|
- If HA schedules at 14:59:58 → rounds to 15:00:00 (shows new interval data)
|
||||||
|
- If HA restarts at 14:59:30 → stays at 14:45:00 (shows current interval data)
|
||||||
|
- **Absolute time tracking**: Timer plans for **all future boundaries** (not relative delays)
|
||||||
|
- Prevents double-updates (if triggered at 14:59:58, next trigger is 15:15:00, not 15:00:00)
|
||||||
|
- **Result**: Current price sensors update without waiting for next API poll
|
||||||
|
|
||||||
|
### 4. Calculator Pattern (Sensor Platform)
|
||||||
|
|
||||||
|
Sensors organized by **calculation method** (refactored Nov 2025):
|
||||||
|
|
||||||
|
**Unified Handler Methods** (`sensor/core.py`):
|
||||||
|
- `_get_interval_value(offset, type)` - current/next/previous intervals
|
||||||
|
- `_get_rolling_hour_value(offset, type)` - 5-interval rolling windows
|
||||||
|
- `_get_daily_stat_value(day, stat_func)` - calendar day min/max/avg
|
||||||
|
- `_get_24h_window_value(stat_func)` - trailing/leading statistics
|
||||||
|
|
||||||
|
**Routing** (`sensor/value_getters.py`):
|
||||||
|
- Single source of truth mapping 80+ entity keys to calculator methods
|
||||||
|
- Organized by calculation type (Interval, Rolling Hour, Daily Stats, etc.)
|
||||||
|
|
||||||
|
**Calculators** (`sensor/calculators/`):
|
||||||
|
- Each calculator inherits from `BaseCalculator` with coordinator access
|
||||||
|
- Focused responsibility: `IntervalCalculator`, `TrendCalculator`, etc.
|
||||||
|
- Complex logic isolated (e.g., `TrendCalculator` has internal caching)
|
||||||
|
|
||||||
|
**Attributes** (`sensor/attributes/`):
|
||||||
|
- Separate from business logic, handles state presentation
|
||||||
|
- Builds extra_state_attributes dicts for entity classes
|
||||||
|
- Unified builders: `build_sensor_attributes()`, `build_extra_state_attributes()`
|
||||||
|
|
||||||
|
**Benefits:**
|
||||||
|
- Minimal code duplication across 80+ sensors
|
||||||
|
- Clear separation of concerns (calculation vs presentation)
|
||||||
|
- Easy to extend: Add sensor → choose pattern → add to routing
|
||||||
|
- Independent testability for each component
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Performance Characteristics
|
||||||
|
|
||||||
|
### API Call Reduction
|
||||||
|
|
||||||
|
- **Without caching:** 96 API calls/day (every 15 min)
|
||||||
|
- **With caching:** ~1-2 API calls/day (only when cache expires)
|
||||||
|
- **Reduction:** ~98%
|
||||||
|
|
||||||
|
### CPU Optimization
|
||||||
|
|
||||||
|
| Optimization | Location | Savings |
|
||||||
|
|--------------|----------|---------|
|
||||||
|
| Config caching | `coordinator/*` | ~50% on config checks |
|
||||||
|
| Period caching | `coordinator/periods.py` | ~70% on period recalculation |
|
||||||
|
| Lazy logging | Throughout | ~15% on log-heavy operations |
|
||||||
|
| Import optimization | Module structure | ~20% faster loading |
|
||||||
|
|
||||||
|
### Memory Usage
|
||||||
|
|
||||||
|
- **Per coordinator instance:** ~126KB cache overhead
|
||||||
|
- **Typical setup:** 1 main + 2 subentries = ~378KB total
|
||||||
|
- **Redundancy eliminated:** 14% reduction (10KB saved per coordinator)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Related Documentation
|
||||||
|
|
||||||
|
- **[Timer Architecture](./timer-architecture.md)** - Timer system, scheduling, coordination (3 independent timers)
|
||||||
|
- **[Caching Strategy](./caching-strategy.md)** - Detailed cache behavior, invalidation, debugging
|
||||||
|
- **[Setup Guide](./setup.md)** - Development environment setup
|
||||||
|
- **[Testing Guide](./testing.md)** - How to test changes
|
||||||
|
- **[Release Management](./release-management.md)** - Release workflow and versioning
|
||||||
|
- **[AGENTS.md](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.24.0/AGENTS.md)** - Complete reference for AI development
|
||||||
|
|
@ -0,0 +1,447 @@
|
||||||
|
---
|
||||||
|
comments: false
|
||||||
|
---
|
||||||
|
|
||||||
|
# Caching Strategy
|
||||||
|
|
||||||
|
This document explains all caching mechanisms in the Tibber Prices integration, their purpose, invalidation logic, and lifetime.
|
||||||
|
|
||||||
|
For timer coordination and scheduling details, see [Timer Architecture](./timer-architecture.md).
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The integration uses **4 distinct caching layers** with different purposes and lifetimes:
|
||||||
|
|
||||||
|
1. **Persistent API Data Cache** (HA Storage) - Hours to days
|
||||||
|
2. **Translation Cache** (Memory) - Forever (until HA restart)
|
||||||
|
3. **Config Dictionary Cache** (Memory) - Until config changes
|
||||||
|
4. **Period Calculation Cache** (Memory) - Until price data or config changes
|
||||||
|
|
||||||
|
## 1. Persistent API Data Cache
|
||||||
|
|
||||||
|
**Location:** `coordinator/cache.py` → HA Storage (`.storage/tibber_prices.<entry_id>`)
|
||||||
|
|
||||||
|
**Purpose:** Reduce API calls to Tibber by caching user data and price data between HA restarts.
|
||||||
|
|
||||||
|
**What is cached:**
|
||||||
|
- **Price data** (`price_data`): Day before yesterday/yesterday/today/tomorrow price intervals with enriched fields (384 intervals total)
|
||||||
|
- **User data** (`user_data`): Homes, subscriptions, features from Tibber GraphQL `viewer` query
|
||||||
|
- **Timestamps**: Last update times for validation
|
||||||
|
|
||||||
|
**Lifetime:**
|
||||||
|
- **Price data**: Until midnight turnover (cleared daily at 00:00 local time)
|
||||||
|
- **User data**: 24 hours (refreshed daily)
|
||||||
|
- **Survives**: HA restarts via persistent Storage
|
||||||
|
|
||||||
|
**Invalidation triggers:**
|
||||||
|
|
||||||
|
1. **Midnight turnover** (Timer #2 in coordinator):
|
||||||
|
```python
|
||||||
|
# coordinator/day_transitions.py
|
||||||
|
def _handle_midnight_turnover() -> None:
|
||||||
|
self._cached_price_data = None # Force fresh fetch for new day
|
||||||
|
self._last_price_update = None
|
||||||
|
await self.store_cache()
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Cache validation on load**:
|
||||||
|
```python
|
||||||
|
# coordinator/cache.py
|
||||||
|
def is_cache_valid(cache_data: CacheData) -> bool:
|
||||||
|
# Checks if price data is from a previous day
|
||||||
|
if today_date < local_now.date(): # Yesterday's data
|
||||||
|
return False
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Tomorrow data check** (after 13:00):
|
||||||
|
```python
|
||||||
|
# coordinator/data_fetching.py
|
||||||
|
if tomorrow_missing or tomorrow_invalid:
|
||||||
|
return "tomorrow_check" # Update needed
|
||||||
|
```
|
||||||
|
|
||||||
|
**Why this cache matters:** Reduces API load on Tibber (~192 intervals per fetch), speeds up HA restarts, enables offline operation until cache expires.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2. Translation Cache
|
||||||
|
|
||||||
|
**Location:** `const.py` → `_TRANSLATIONS_CACHE` and `_STANDARD_TRANSLATIONS_CACHE` (in-memory dicts)
|
||||||
|
|
||||||
|
**Purpose:** Avoid repeated file I/O when accessing entity descriptions, UI strings, etc.
|
||||||
|
|
||||||
|
**What is cached:**
|
||||||
|
- **Standard translations** (`/translations/*.json`): Config flow, selector options, entity names
|
||||||
|
- **Custom translations** (`/custom_translations/*.json`): Entity descriptions, usage tips, long descriptions
|
||||||
|
|
||||||
|
**Lifetime:**
|
||||||
|
- **Forever** (until HA restart)
|
||||||
|
- No invalidation during runtime
|
||||||
|
|
||||||
|
**When populated:**
|
||||||
|
- At integration setup: `async_load_translations(hass, "en")` in `__init__.py`
|
||||||
|
- Lazy loading: If translation missing, attempts file load once
|
||||||
|
|
||||||
|
**Access pattern:**
|
||||||
|
```python
|
||||||
|
# Non-blocking synchronous access from cached data
|
||||||
|
description = get_translation("binary_sensor.best_price_period.description", "en")
|
||||||
|
```
|
||||||
|
|
||||||
|
**Why this cache matters:** Entity attributes are accessed on every state update (~15 times per hour per entity). File I/O would block the event loop. Cache enables synchronous, non-blocking attribute generation.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3. Config Dictionary Cache
|
||||||
|
|
||||||
|
**Location:** `coordinator/data_transformation.py` and `coordinator/periods.py` (per-instance fields)
|
||||||
|
|
||||||
|
**Purpose:** Avoid ~30-40 `options.get()` calls on every coordinator update (every 15 minutes).
|
||||||
|
|
||||||
|
**What is cached:**
|
||||||
|
|
||||||
|
### DataTransformer Config Cache
|
||||||
|
```python
|
||||||
|
{
|
||||||
|
"thresholds": {"low": 15, "high": 35},
|
||||||
|
"volatility_thresholds": {"moderate": 15.0, "high": 25.0, "very_high": 40.0},
|
||||||
|
# ... 20+ more config fields
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### PeriodCalculator Config Cache
|
||||||
|
```python
|
||||||
|
{
|
||||||
|
"best": {"flex": 0.15, "min_distance_from_avg": 5.0, "min_period_length": 60},
|
||||||
|
"peak": {"flex": 0.15, "min_distance_from_avg": 5.0, "min_period_length": 60}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Lifetime:**
|
||||||
|
- Until `invalidate_config_cache()` is called
|
||||||
|
- Built once on first use per coordinator update cycle
|
||||||
|
|
||||||
|
**Invalidation trigger:**
|
||||||
|
- **Options change** (user reconfigures integration):
|
||||||
|
```python
|
||||||
|
# coordinator/core.py
|
||||||
|
async def _handle_options_update(...) -> None:
|
||||||
|
self._data_transformer.invalidate_config_cache()
|
||||||
|
self._period_calculator.invalidate_config_cache()
|
||||||
|
await self.async_request_refresh()
|
||||||
|
```
|
||||||
|
|
||||||
|
**Performance impact:**
|
||||||
|
- **Before:** ~30 dict lookups + type conversions per update = ~50μs
|
||||||
|
- **After:** 1 cache check = ~1μs
|
||||||
|
- **Savings:** ~98% (50μs → 1μs per update)
|
||||||
|
|
||||||
|
**Why this cache matters:** Config is read multiple times per update (transformation + period calculation + validation). Caching eliminates redundant lookups without changing behavior.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4. Period Calculation Cache
|
||||||
|
|
||||||
|
**Location:** `coordinator/periods.py` → `PeriodCalculator._cached_periods`
|
||||||
|
|
||||||
|
**Purpose:** Avoid expensive period calculations (~100-500ms) when price data and config haven't changed.
|
||||||
|
|
||||||
|
**What is cached:**
|
||||||
|
```python
|
||||||
|
{
|
||||||
|
"best_price": {
|
||||||
|
"periods": [...], # Calculated period objects
|
||||||
|
"intervals": [...], # All intervals in periods
|
||||||
|
"metadata": {...} # Config snapshot
|
||||||
|
},
|
||||||
|
"best_price_relaxation": {"relaxation_active": bool, ...},
|
||||||
|
"peak_price": {...},
|
||||||
|
"peak_price_relaxation": {...}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Cache key:** Hash of relevant inputs
|
||||||
|
```python
|
||||||
|
hash_data = (
|
||||||
|
today_signature, # (startsAt, rating_level) for each interval
|
||||||
|
tuple(best_config.items()), # Best price config
|
||||||
|
tuple(peak_config.items()), # Peak price config
|
||||||
|
best_level_filter, # Level filter overrides
|
||||||
|
peak_level_filter
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Lifetime:**
|
||||||
|
- Until price data changes (today's intervals modified)
|
||||||
|
- Until config changes (flex, thresholds, filters)
|
||||||
|
- Recalculated at midnight (new today data)
|
||||||
|
|
||||||
|
**Invalidation triggers:**
|
||||||
|
|
||||||
|
1. **Config change** (explicit):
|
||||||
|
```python
|
||||||
|
def invalidate_config_cache() -> None:
|
||||||
|
self._cached_periods = None
|
||||||
|
self._last_periods_hash = None
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Price data change** (automatic via hash mismatch):
|
||||||
|
```python
|
||||||
|
current_hash = self._compute_periods_hash(price_info)
|
||||||
|
if self._last_periods_hash != current_hash:
|
||||||
|
# Cache miss - recalculate
|
||||||
|
```
|
||||||
|
|
||||||
|
**Cache hit rate:**
|
||||||
|
- **High:** During normal operation (coordinator updates every 15min, price data unchanged)
|
||||||
|
- **Low:** After midnight (new today data) or when tomorrow data arrives (~13:00-14:00)
|
||||||
|
|
||||||
|
**Performance impact:**
|
||||||
|
- **Period calculation:** ~100-500ms (depends on interval count, relaxation attempts)
|
||||||
|
- **Cache hit:** `<`1ms (hash comparison + dict lookup)
|
||||||
|
- **Savings:** ~70% of calculation time (most updates hit cache)
|
||||||
|
|
||||||
|
**Why this cache matters:** Period calculation is CPU-intensive (filtering, gap tolerance, relaxation). Caching avoids recalculating unchanged periods 3-4 times per hour.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5. Transformation Cache (Price Enrichment Only)
|
||||||
|
|
||||||
|
**Location:** `coordinator/data_transformation.py` → `_cached_transformed_data`
|
||||||
|
|
||||||
|
**Status:** ✅ **Clean separation** - enrichment only, no redundancy
|
||||||
|
|
||||||
|
**What is cached:**
|
||||||
|
```python
|
||||||
|
{
|
||||||
|
"timestamp": ...,
|
||||||
|
"homes": {...},
|
||||||
|
"priceInfo": {...}, # Enriched price data (trailing_avg_24h, difference, rating_level)
|
||||||
|
# NO periods - periods are exclusively managed by PeriodCalculator
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Purpose:** Avoid re-enriching price data when config unchanged between midnight checks.
|
||||||
|
|
||||||
|
**Current behavior:**
|
||||||
|
- Caches **only enriched price data** (price + statistics)
|
||||||
|
- **Does NOT cache periods** (handled by Period Calculation Cache)
|
||||||
|
- Invalidated when:
|
||||||
|
- Config changes (thresholds affect enrichment)
|
||||||
|
- Midnight turnover detected
|
||||||
|
- New update cycle begins
|
||||||
|
|
||||||
|
**Architecture:**
|
||||||
|
- DataTransformer: Handles price enrichment only
|
||||||
|
- PeriodCalculator: Handles period calculation only (with hash-based cache)
|
||||||
|
- Coordinator: Assembles final data on-demand from both caches
|
||||||
|
|
||||||
|
**Memory savings:** Eliminating redundant period storage saves ~10KB per coordinator (14% reduction).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Cache Invalidation Flow
|
||||||
|
|
||||||
|
### User Changes Options (Config Flow)
|
||||||
|
```
|
||||||
|
User saves options
|
||||||
|
↓
|
||||||
|
config_entry.add_update_listener() triggers
|
||||||
|
↓
|
||||||
|
coordinator._handle_options_update()
|
||||||
|
↓
|
||||||
|
├─> DataTransformer.invalidate_config_cache()
|
||||||
|
│ └─> _config_cache = None
|
||||||
|
│ _config_cache_valid = False
|
||||||
|
│ _cached_transformed_data = None
|
||||||
|
│
|
||||||
|
└─> PeriodCalculator.invalidate_config_cache()
|
||||||
|
└─> _config_cache = None
|
||||||
|
_config_cache_valid = False
|
||||||
|
_cached_periods = None
|
||||||
|
_last_periods_hash = None
|
||||||
|
↓
|
||||||
|
coordinator.async_request_refresh()
|
||||||
|
↓
|
||||||
|
Fresh data fetch with new config
|
||||||
|
```
|
||||||
|
|
||||||
|
### Midnight Turnover (Day Transition)
|
||||||
|
```
|
||||||
|
Timer #2 fires at 00:00
|
||||||
|
↓
|
||||||
|
coordinator._handle_midnight_turnover()
|
||||||
|
↓
|
||||||
|
├─> Clear persistent cache
|
||||||
|
│ └─> _cached_price_data = None
|
||||||
|
│ _last_price_update = None
|
||||||
|
│
|
||||||
|
└─> Clear transformation cache
|
||||||
|
└─> _cached_transformed_data = None
|
||||||
|
_last_transformation_config = None
|
||||||
|
↓
|
||||||
|
Period cache auto-invalidates (hash mismatch on new "today")
|
||||||
|
↓
|
||||||
|
Fresh API fetch for new day
|
||||||
|
```
|
||||||
|
|
||||||
|
### Tomorrow Data Arrives (~13:00)
|
||||||
|
```
|
||||||
|
Coordinator update cycle
|
||||||
|
↓
|
||||||
|
should_update_price_data() checks tomorrow
|
||||||
|
↓
|
||||||
|
Tomorrow data missing/invalid
|
||||||
|
↓
|
||||||
|
API fetch with new tomorrow data
|
||||||
|
↓
|
||||||
|
Price data hash changes (new intervals)
|
||||||
|
↓
|
||||||
|
Period cache auto-invalidates (hash mismatch)
|
||||||
|
↓
|
||||||
|
Periods recalculated with tomorrow included
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Cache Coordination
|
||||||
|
|
||||||
|
**All caches work together:**
|
||||||
|
|
||||||
|
```
|
||||||
|
Persistent Storage (HA restart)
|
||||||
|
↓
|
||||||
|
API Data Cache (price_data, user_data)
|
||||||
|
↓
|
||||||
|
├─> Enrichment (add rating_level, difference, etc.)
|
||||||
|
│ ↓
|
||||||
|
│ Transformation Cache (_cached_transformed_data)
|
||||||
|
│
|
||||||
|
└─> Period Calculation
|
||||||
|
↓
|
||||||
|
Period Cache (_cached_periods)
|
||||||
|
↓
|
||||||
|
Config Cache (avoid re-reading options)
|
||||||
|
↓
|
||||||
|
Translation Cache (entity descriptions)
|
||||||
|
```
|
||||||
|
|
||||||
|
**No cache invalidation cascades:**
|
||||||
|
- Config cache invalidation is **explicit** (on options update)
|
||||||
|
- Period cache invalidation is **automatic** (via hash mismatch)
|
||||||
|
- Transformation cache invalidation is **automatic** (on midnight/config change)
|
||||||
|
- Translation cache is **never invalidated** (read-only after load)
|
||||||
|
|
||||||
|
**Thread safety:**
|
||||||
|
- All caches are accessed from `MainThread` only (Home Assistant event loop)
|
||||||
|
- No locking needed (single-threaded execution model)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Performance Characteristics
|
||||||
|
|
||||||
|
### Typical Operation (No Changes)
|
||||||
|
```
|
||||||
|
Coordinator Update (every 15 min)
|
||||||
|
├─> API fetch: SKIP (cache valid)
|
||||||
|
├─> Config dict build: ~1μs (cached)
|
||||||
|
├─> Period calculation: ~1ms (cached, hash match)
|
||||||
|
├─> Transformation: ~10ms (enrichment only, periods cached)
|
||||||
|
└─> Entity updates: ~5ms (translation cache hit)
|
||||||
|
|
||||||
|
Total: ~16ms (down from ~600ms without caching)
|
||||||
|
```
|
||||||
|
|
||||||
|
### After Midnight Turnover
|
||||||
|
```
|
||||||
|
Coordinator Update (00:00)
|
||||||
|
├─> API fetch: ~500ms (cache cleared, fetch new day)
|
||||||
|
├─> Config dict build: ~50μs (rebuild, no cache)
|
||||||
|
├─> Period calculation: ~200ms (cache miss, recalculate)
|
||||||
|
├─> Transformation: ~50ms (re-enrich, rebuild)
|
||||||
|
└─> Entity updates: ~5ms (translation cache still valid)
|
||||||
|
|
||||||
|
Total: ~755ms (expected once per day)
|
||||||
|
```
|
||||||
|
|
||||||
|
### After Config Change
|
||||||
|
```
|
||||||
|
Options Update
|
||||||
|
├─> Cache invalidation: `<`1ms
|
||||||
|
├─> Coordinator refresh: ~600ms
|
||||||
|
│ ├─> API fetch: SKIP (data unchanged)
|
||||||
|
│ ├─> Config rebuild: ~50μs
|
||||||
|
│ ├─> Period recalculation: ~200ms (new thresholds)
|
||||||
|
│ ├─> Re-enrichment: ~50ms
|
||||||
|
│ └─> Entity updates: ~5ms
|
||||||
|
└─> Total: ~600ms (expected on manual reconfiguration)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Summary Table
|
||||||
|
|
||||||
|
| Cache Type | Lifetime | Size | Invalidation | Purpose |
|
||||||
|
|------------|----------|------|--------------|---------|
|
||||||
|
| **API Data** | Hours to 1 day | ~50KB | Midnight, validation | Reduce API calls |
|
||||||
|
| **Translations** | Forever (until HA restart) | ~5KB | Never | Avoid file I/O |
|
||||||
|
| **Config Dicts** | Until options change | `<`1KB | Explicit (options update) | Avoid dict lookups |
|
||||||
|
| **Period Calculation** | Until data/config change | ~10KB | Auto (hash mismatch) | Avoid CPU-intensive calculation |
|
||||||
|
| **Transformation** | Until midnight/config change | ~50KB | Auto (midnight/config) | Avoid re-enrichment |
|
||||||
|
|
||||||
|
**Total memory overhead:** ~116KB per coordinator instance (main + subentries)
|
||||||
|
|
||||||
|
**Benefits:**
|
||||||
|
- 97% reduction in API calls (from every 15min to once per day)
|
||||||
|
- 70% reduction in period calculation time (cache hits during normal operation)
|
||||||
|
- 98% reduction in config access time (30+ lookups → 1 cache check)
|
||||||
|
- Zero file I/O during runtime (translations cached at startup)
|
||||||
|
|
||||||
|
**Trade-offs:**
|
||||||
|
- Memory usage: ~116KB per home (negligible for modern systems)
|
||||||
|
- Code complexity: 5 cache invalidation points (well-tested, documented)
|
||||||
|
- Debugging: Must understand cache lifetime when investigating stale data issues
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Debugging Cache Issues
|
||||||
|
|
||||||
|
### Symptom: Stale data after config change
|
||||||
|
**Check:**
|
||||||
|
1. Is `_handle_options_update()` called? (should see "Options updated" log)
|
||||||
|
2. Are `invalidate_config_cache()` methods executed?
|
||||||
|
3. Does `async_request_refresh()` trigger?
|
||||||
|
|
||||||
|
**Fix:** Ensure `config_entry.add_update_listener()` is registered in coordinator init.
|
||||||
|
|
||||||
|
### Symptom: Period calculation not updating
|
||||||
|
**Check:**
|
||||||
|
1. Verify hash changes when data changes: `_compute_periods_hash()`
|
||||||
|
2. Check `_last_periods_hash` vs `current_hash`
|
||||||
|
3. Look for "Using cached period calculation" vs "Calculating periods" logs
|
||||||
|
|
||||||
|
**Fix:** Hash function may not include all relevant data. Review `_compute_periods_hash()` inputs.
|
||||||
|
|
||||||
|
### Symptom: Yesterday's prices shown as today
|
||||||
|
**Check:**
|
||||||
|
1. `is_cache_valid()` logic in `coordinator/cache.py`
|
||||||
|
2. Midnight turnover execution (Timer #2)
|
||||||
|
3. Cache clear confirmation in logs
|
||||||
|
|
||||||
|
**Fix:** Timer may not be firing. Check `_schedule_midnight_turnover()` registration.
|
||||||
|
|
||||||
|
### Symptom: Missing translations
|
||||||
|
**Check:**
|
||||||
|
1. `async_load_translations()` called at startup?
|
||||||
|
2. Translation files exist in `/translations/` and `/custom_translations/`?
|
||||||
|
3. Cache population: `_TRANSLATIONS_CACHE` keys
|
||||||
|
|
||||||
|
**Fix:** Re-install integration or restart HA to reload translation files.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Related Documentation
|
||||||
|
|
||||||
|
- **[Timer Architecture](./timer-architecture.md)** - Timer system, scheduling, midnight coordination
|
||||||
|
- **[Architecture](./architecture.md)** - Overall system design, data flow
|
||||||
|
- **[AGENTS.md](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.24.0/AGENTS.md)** - Complete reference for AI development
|
||||||
|
|
@ -0,0 +1,121 @@
|
||||||
|
---
|
||||||
|
comments: false
|
||||||
|
---
|
||||||
|
|
||||||
|
# Coding Guidelines
|
||||||
|
|
||||||
|
> **Note:** For complete coding standards, see [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.24.0/AGENTS.md).
|
||||||
|
|
||||||
|
## Code Style
|
||||||
|
|
||||||
|
- **Formatter/Linter**: Ruff (replaces Black, Flake8, isort)
|
||||||
|
- **Max line length**: 120 characters
|
||||||
|
- **Max complexity**: 25 (McCabe)
|
||||||
|
- **Target**: Python 3.13
|
||||||
|
|
||||||
|
Run before committing:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/lint # Auto-fix issues
|
||||||
|
./scripts/release/hassfest # Validate integration structure
|
||||||
|
```
|
||||||
|
|
||||||
|
## Naming Conventions
|
||||||
|
|
||||||
|
### Class Names
|
||||||
|
|
||||||
|
**All public classes MUST use the integration name as prefix.**
|
||||||
|
|
||||||
|
This is a Home Assistant standard to avoid naming conflicts between integrations.
|
||||||
|
|
||||||
|
```python
|
||||||
|
# ✅ CORRECT
|
||||||
|
class TibberPricesApiClient:
|
||||||
|
class TibberPricesDataUpdateCoordinator:
|
||||||
|
class TibberPricesSensor:
|
||||||
|
|
||||||
|
# ❌ WRONG - Missing prefix
|
||||||
|
class ApiClient:
|
||||||
|
class DataFetcher:
|
||||||
|
class TimeService:
|
||||||
|
```
|
||||||
|
|
||||||
|
**When prefix is required:**
|
||||||
|
- Public classes used across multiple modules
|
||||||
|
- All exception classes
|
||||||
|
- All coordinator and entity classes
|
||||||
|
- Data classes (dataclasses, NamedTuples) used as public APIs
|
||||||
|
|
||||||
|
**When prefix can be omitted:**
|
||||||
|
- Private helper classes within a single module (prefix with `_` underscore)
|
||||||
|
- Type aliases and callbacks (e.g., `TimeServiceCallback`)
|
||||||
|
- Small internal NamedTuples for function returns
|
||||||
|
|
||||||
|
**Private Classes:**
|
||||||
|
|
||||||
|
If a helper class is ONLY used within a single module file, prefix it with underscore:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# ✅ Private class - used only in this file
|
||||||
|
class _InternalHelper:
|
||||||
|
"""Helper used only within this module."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
# ❌ Wrong - no prefix but used across modules
|
||||||
|
class DataFetcher: # Should be TibberPricesDataFetcher
|
||||||
|
pass
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note:** Currently (Nov 2025), this project has **NO private classes** - all classes are used across module boundaries.
|
||||||
|
|
||||||
|
**Current Technical Debt:**
|
||||||
|
|
||||||
|
Many existing classes lack the `TibberPrices` prefix. Before refactoring:
|
||||||
|
1. Document the plan in `/planning/class-naming-refactoring.md`
|
||||||
|
2. Use `multi_replace_string_in_file` for bulk renames
|
||||||
|
3. Test thoroughly after each module
|
||||||
|
|
||||||
|
See [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.24.0/AGENTS.md) for complete list of classes needing rename.
|
||||||
|
|
||||||
|
## Import Order
|
||||||
|
|
||||||
|
1. Python stdlib (specific types only)
|
||||||
|
2. Third-party (`homeassistant.*`, `aiohttp`)
|
||||||
|
3. Local (`.api`, `.const`)
|
||||||
|
|
||||||
|
## Critical Patterns
|
||||||
|
|
||||||
|
### Time Handling
|
||||||
|
|
||||||
|
Always use `dt_util` from `homeassistant.util`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from homeassistant.util import dt as dt_util
|
||||||
|
|
||||||
|
price_time = dt_util.parse_datetime(starts_at)
|
||||||
|
price_time = dt_util.as_local(price_time) # Convert to HA timezone
|
||||||
|
now = dt_util.now()
|
||||||
|
```
|
||||||
|
|
||||||
|
### Translation Loading
|
||||||
|
|
||||||
|
```python
|
||||||
|
# In __init__.py async_setup_entry:
|
||||||
|
await async_load_translations(hass, "en")
|
||||||
|
await async_load_standard_translations(hass, "en")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Price Data Enrichment
|
||||||
|
|
||||||
|
Always enrich raw API data:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from .price_utils import enrich_price_info_with_differences
|
||||||
|
|
||||||
|
enriched = enrich_price_info_with_differences(
|
||||||
|
price_info_data,
|
||||||
|
thresholds,
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
See [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.24.0/AGENTS.md) for complete guidelines.
|
||||||
216
docs/developer/versioned_docs/version-v0.24.0/contributing.md
Normal file
216
docs/developer/versioned_docs/version-v0.24.0/contributing.md
Normal file
|
|
@ -0,0 +1,216 @@
|
||||||
|
# Contributing Guide
|
||||||
|
|
||||||
|
Welcome! This guide helps you contribute to the Tibber Prices integration.
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
- Git
|
||||||
|
- VS Code with Remote Containers extension
|
||||||
|
- Docker Desktop
|
||||||
|
|
||||||
|
### Fork and Clone
|
||||||
|
|
||||||
|
1. Fork the repository on GitHub
|
||||||
|
2. Clone your fork:
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/YOUR_USERNAME/hass.tibber_prices.git
|
||||||
|
cd hass.tibber_prices
|
||||||
|
```
|
||||||
|
3. Open in VS Code
|
||||||
|
4. Click "Reopen in Container" when prompted
|
||||||
|
|
||||||
|
The DevContainer will set up everything automatically.
|
||||||
|
|
||||||
|
## Development Workflow
|
||||||
|
|
||||||
|
### 1. Create a Branch
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git checkout -b feature/your-feature-name
|
||||||
|
# or
|
||||||
|
git checkout -b fix/issue-123-description
|
||||||
|
```
|
||||||
|
|
||||||
|
**Branch naming:**
|
||||||
|
- `feature/` - New features
|
||||||
|
- `fix/` - Bug fixes
|
||||||
|
- `docs/` - Documentation only
|
||||||
|
- `refactor/` - Code restructuring
|
||||||
|
- `test/` - Test improvements
|
||||||
|
|
||||||
|
### 2. Make Changes
|
||||||
|
|
||||||
|
Edit code, following [Coding Guidelines](coding-guidelines.md).
|
||||||
|
|
||||||
|
**Run checks frequently:**
|
||||||
|
```bash
|
||||||
|
./scripts/type-check # Pyright type checking
|
||||||
|
./scripts/lint # Ruff linting (auto-fix)
|
||||||
|
./scripts/test # Run tests
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Test Locally
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/develop # Start HA with integration loaded
|
||||||
|
```
|
||||||
|
|
||||||
|
Access at http://localhost:8123
|
||||||
|
|
||||||
|
### 4. Write Tests
|
||||||
|
|
||||||
|
Add tests in `/tests/` for new features:
|
||||||
|
|
||||||
|
```python
|
||||||
|
@pytest.mark.unit
|
||||||
|
async def test_your_feature(hass, coordinator):
|
||||||
|
"""Test your new feature."""
|
||||||
|
# Arrange
|
||||||
|
coordinator.data = {...}
|
||||||
|
|
||||||
|
# Act
|
||||||
|
result = your_function(coordinator.data)
|
||||||
|
|
||||||
|
# Assert
|
||||||
|
assert result == expected_value
|
||||||
|
```
|
||||||
|
|
||||||
|
Run your test:
|
||||||
|
```bash
|
||||||
|
./scripts/test tests/test_your_feature.py -v
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Commit Changes
|
||||||
|
|
||||||
|
Follow [Conventional Commits](https://www.conventionalcommits.org/):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git add .
|
||||||
|
git commit -m "feat(sensors): add volatility trend sensor
|
||||||
|
|
||||||
|
Add new sensor showing 3-hour volatility trend direction.
|
||||||
|
Includes attributes with historical volatility data.
|
||||||
|
|
||||||
|
Impact: Users can predict when prices will stabilize or continue fluctuating."
|
||||||
|
```
|
||||||
|
|
||||||
|
**Commit types:**
|
||||||
|
- `feat:` - New feature
|
||||||
|
- `fix:` - Bug fix
|
||||||
|
- `docs:` - Documentation
|
||||||
|
- `refactor:` - Code restructuring
|
||||||
|
- `test:` - Test changes
|
||||||
|
- `chore:` - Maintenance
|
||||||
|
|
||||||
|
**Add scope when relevant:**
|
||||||
|
- `feat(sensors):` - Sensor platform
|
||||||
|
- `fix(coordinator):` - Data coordinator
|
||||||
|
- `docs(user):` - User documentation
|
||||||
|
|
||||||
|
### 6. Push and Create PR
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git push origin your-branch-name
|
||||||
|
```
|
||||||
|
|
||||||
|
Then open Pull Request on GitHub.
|
||||||
|
|
||||||
|
## Pull Request Guidelines
|
||||||
|
|
||||||
|
### PR Template
|
||||||
|
|
||||||
|
Title: Short, descriptive (50 chars max)
|
||||||
|
|
||||||
|
Description should include:
|
||||||
|
```markdown
|
||||||
|
## What
|
||||||
|
Brief description of changes
|
||||||
|
|
||||||
|
## Why
|
||||||
|
Problem being solved or feature rationale
|
||||||
|
|
||||||
|
## How
|
||||||
|
Implementation approach
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
- [ ] Manual testing in Home Assistant
|
||||||
|
- [ ] Unit tests added/updated
|
||||||
|
- [ ] Type checking passes
|
||||||
|
- [ ] Linting passes
|
||||||
|
|
||||||
|
## Breaking Changes
|
||||||
|
(If any - describe migration path)
|
||||||
|
|
||||||
|
## Related Issues
|
||||||
|
Closes #123
|
||||||
|
```
|
||||||
|
|
||||||
|
### PR Checklist
|
||||||
|
|
||||||
|
Before submitting:
|
||||||
|
- [ ] Code follows [Coding Guidelines](coding-guidelines.md)
|
||||||
|
- [ ] All tests pass (`./scripts/test`)
|
||||||
|
- [ ] Type checking passes (`./scripts/type-check`)
|
||||||
|
- [ ] Linting passes (`./scripts/lint-check`)
|
||||||
|
- [ ] Documentation updated (if needed)
|
||||||
|
- [ ] AGENTS.md updated (if patterns changed)
|
||||||
|
- [ ] Commit messages follow Conventional Commits
|
||||||
|
|
||||||
|
### Review Process
|
||||||
|
|
||||||
|
1. **Automated checks** run (CI/CD)
|
||||||
|
2. **Maintainer review** (usually within 3 days)
|
||||||
|
3. **Address feedback** if requested
|
||||||
|
4. **Approval** → Maintainer merges
|
||||||
|
|
||||||
|
## Code Review Tips
|
||||||
|
|
||||||
|
### What Reviewers Look For
|
||||||
|
|
||||||
|
✅ **Good:**
|
||||||
|
- Clear, self-explanatory code
|
||||||
|
- Appropriate comments for complex logic
|
||||||
|
- Tests covering edge cases
|
||||||
|
- Type hints on all functions
|
||||||
|
- Follows existing patterns
|
||||||
|
|
||||||
|
❌ **Avoid:**
|
||||||
|
- Large PRs (>500 lines) - split into smaller ones
|
||||||
|
- Mixing unrelated changes
|
||||||
|
- Missing tests for new features
|
||||||
|
- Breaking changes without migration path
|
||||||
|
- Copy-pasted code (refactor into shared functions)
|
||||||
|
|
||||||
|
### Responding to Feedback
|
||||||
|
|
||||||
|
- Don't take it personally - we're improving code together
|
||||||
|
- Ask questions if feedback unclear
|
||||||
|
- Push additional commits to address comments
|
||||||
|
- Mark conversations as resolved when fixed
|
||||||
|
|
||||||
|
## Finding Issues to Work On
|
||||||
|
|
||||||
|
Good first issues are labeled:
|
||||||
|
- `good first issue` - Beginner-friendly
|
||||||
|
- `help wanted` - Maintainers welcome contributions
|
||||||
|
- `documentation` - Docs improvements
|
||||||
|
|
||||||
|
Comment on issue before starting work to avoid duplicates.
|
||||||
|
|
||||||
|
## Communication
|
||||||
|
|
||||||
|
- **GitHub Issues** - Bug reports, feature requests
|
||||||
|
- **Pull Requests** - Code discussion
|
||||||
|
- **Discussions** - General questions, ideas
|
||||||
|
|
||||||
|
Be respectful, constructive, and patient. We're all volunteers! 🙏
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
💡 **Related:**
|
||||||
|
- [Setup Guide](setup.md) - DevContainer setup
|
||||||
|
- [Coding Guidelines](coding-guidelines.md) - Style guide
|
||||||
|
- [Testing](testing.md) - Writing tests
|
||||||
|
- [Release Management](release-management.md) - How releases work
|
||||||
|
|
@ -0,0 +1,286 @@
|
||||||
|
---
|
||||||
|
comments: false
|
||||||
|
---
|
||||||
|
|
||||||
|
# Critical Behavior Patterns - Testing Guide
|
||||||
|
|
||||||
|
**Purpose:** This documentation lists essential behavior patterns that must be tested to ensure production-quality code and prevent resource leaks.
|
||||||
|
|
||||||
|
**Last Updated:** 2025-11-22
|
||||||
|
**Test Coverage:** 41 tests implemented (100% of critical patterns)
|
||||||
|
|
||||||
|
## 🎯 Why Are These Tests Critical?
|
||||||
|
|
||||||
|
Home Assistant integrations run **continuously** in the background. Resource leaks lead to:
|
||||||
|
- **Memory Leaks**: RAM usage grows over days/weeks until HA becomes unstable
|
||||||
|
- **Callback Leaks**: Listeners remain registered after entity removal → CPU load increases
|
||||||
|
- **Timer Leaks**: Timers continue running after unload → unnecessary background tasks
|
||||||
|
- **File Handle Leaks**: Storage files remain open → system resources exhausted
|
||||||
|
|
||||||
|
## ✅ Test Categories
|
||||||
|
|
||||||
|
### 1. Resource Cleanup (Memory Leak Prevention)
|
||||||
|
|
||||||
|
**File:** `tests/test_resource_cleanup.py`
|
||||||
|
|
||||||
|
#### 1.1 Listener Cleanup ✅
|
||||||
|
|
||||||
|
**What is tested:**
|
||||||
|
- Time-sensitive listeners are correctly removed (`async_add_time_sensitive_listener()`)
|
||||||
|
- Minute-update listeners are correctly removed (`async_add_minute_update_listener()`)
|
||||||
|
- Lifecycle callbacks are correctly unregistered (`register_lifecycle_callback()`)
|
||||||
|
- Sensor cleanup removes ALL registered listeners
|
||||||
|
- Binary sensor cleanup removes ALL registered listeners
|
||||||
|
|
||||||
|
**Why critical:**
|
||||||
|
- Each registered listener holds references to Entity + Coordinator
|
||||||
|
- Without cleanup: Entities are not freed by GC → Memory Leak
|
||||||
|
- With 80+ sensors × 3 listener types = 240+ callbacks that must be cleanly removed
|
||||||
|
|
||||||
|
**Code Locations:**
|
||||||
|
- `coordinator/listeners.py` → `async_add_time_sensitive_listener()`, `async_add_minute_update_listener()`
|
||||||
|
- `coordinator/core.py` → `register_lifecycle_callback()`
|
||||||
|
- `sensor/core.py` → `async_will_remove_from_hass()`
|
||||||
|
- `binary_sensor/core.py` → `async_will_remove_from_hass()`
|
||||||
|
|
||||||
|
#### 1.2 Timer Cleanup ✅
|
||||||
|
|
||||||
|
**What is tested:**
|
||||||
|
- Quarter-hour timer is cancelled and reference cleared
|
||||||
|
- Minute timer is cancelled and reference cleared
|
||||||
|
- Both timers are cancelled together
|
||||||
|
- Cleanup works even when timers are `None`
|
||||||
|
|
||||||
|
**Why critical:**
|
||||||
|
- Uncancelled timers continue running after integration unload
|
||||||
|
- HA's `async_track_utc_time_change()` creates persistent callbacks
|
||||||
|
- Without cleanup: Timers keep firing → CPU load + unnecessary coordinator updates
|
||||||
|
|
||||||
|
**Code Locations:**
|
||||||
|
- `coordinator/listeners.py` → `cancel_timers()`
|
||||||
|
- `coordinator/core.py` → `async_shutdown()`
|
||||||
|
|
||||||
|
#### 1.3 Config Entry Cleanup ✅
|
||||||
|
|
||||||
|
**What is tested:**
|
||||||
|
- Options update listener is registered via `async_on_unload()`
|
||||||
|
- Cleanup function is correctly passed to `async_on_unload()`
|
||||||
|
|
||||||
|
**Why critical:**
|
||||||
|
- `entry.add_update_listener()` registers permanent callback
|
||||||
|
- Without `async_on_unload()`: Listener remains active after reload → duplicate updates
|
||||||
|
- Pattern: `entry.async_on_unload(entry.add_update_listener(handler))`
|
||||||
|
|
||||||
|
**Code Locations:**
|
||||||
|
- `coordinator/core.py` → `__init__()` (listener registration)
|
||||||
|
- `__init__.py` → `async_unload_entry()`
|
||||||
|
|
||||||
|
### 2. Cache Invalidation ✅
|
||||||
|
|
||||||
|
**File:** `tests/test_resource_cleanup.py`
|
||||||
|
|
||||||
|
#### 2.1 Config Cache Invalidation
|
||||||
|
|
||||||
|
**What is tested:**
|
||||||
|
- DataTransformer config cache is invalidated on options change
|
||||||
|
- PeriodCalculator config + period cache is invalidated
|
||||||
|
- Trend calculator cache is cleared on coordinator update
|
||||||
|
|
||||||
|
**Why critical:**
|
||||||
|
- Stale config → Sensors use old user settings
|
||||||
|
- Stale period cache → Incorrect best/peak price periods
|
||||||
|
- Stale trend cache → Outdated trend analysis
|
||||||
|
|
||||||
|
**Code Locations:**
|
||||||
|
- `coordinator/data_transformation.py` → `invalidate_config_cache()`
|
||||||
|
- `coordinator/periods.py` → `invalidate_config_cache()`
|
||||||
|
- `sensor/calculators/trend.py` → `clear_trend_cache()`
|
||||||
|
|
||||||
|
### 3. Storage Cleanup ✅
|
||||||
|
|
||||||
|
**File:** `tests/test_resource_cleanup.py` + `tests/test_coordinator_shutdown.py`
|
||||||
|
|
||||||
|
#### 3.1 Persistent Storage Removal
|
||||||
|
|
||||||
|
**What is tested:**
|
||||||
|
- Storage file is deleted on config entry removal
|
||||||
|
- Cache is saved on shutdown (no data loss)
|
||||||
|
|
||||||
|
**Why critical:**
|
||||||
|
- Without storage removal: Old files remain after uninstallation
|
||||||
|
- Without cache save on shutdown: Data loss on HA restart
|
||||||
|
- Storage path: `.storage/tibber_prices.{entry_id}`
|
||||||
|
|
||||||
|
**Code Locations:**
|
||||||
|
- `__init__.py` → `async_remove_entry()`
|
||||||
|
- `coordinator/core.py` → `async_shutdown()`
|
||||||
|
|
||||||
|
### 4. Timer Scheduling ✅
|
||||||
|
|
||||||
|
**File:** `tests/test_timer_scheduling.py`
|
||||||
|
|
||||||
|
**What is tested:**
|
||||||
|
- Quarter-hour timer is registered with correct parameters
|
||||||
|
- Minute timer is registered with correct parameters
|
||||||
|
- Timers can be re-scheduled (override old timer)
|
||||||
|
- Midnight turnover detection works correctly
|
||||||
|
|
||||||
|
**Why critical:**
|
||||||
|
- Wrong timer parameters → Entities update at wrong times
|
||||||
|
- Without timer override on re-schedule → Multiple parallel timers → Performance problem
|
||||||
|
|
||||||
|
### 5. Sensor-to-Timer Assignment ✅
|
||||||
|
|
||||||
|
**File:** `tests/test_sensor_timer_assignment.py`
|
||||||
|
|
||||||
|
**What is tested:**
|
||||||
|
- All `TIME_SENSITIVE_ENTITY_KEYS` are valid entity keys
|
||||||
|
- All `MINUTE_UPDATE_ENTITY_KEYS` are valid entity keys
|
||||||
|
- Both lists are disjoint (no overlap)
|
||||||
|
- Sensor and binary sensor platforms are checked
|
||||||
|
|
||||||
|
**Why critical:**
|
||||||
|
- Wrong timer assignment → Sensors update at wrong times
|
||||||
|
- Overlap → Duplicate updates → Performance problem
|
||||||
|
|
||||||
|
## 🚨 Additional Analysis (Nice-to-Have Patterns)
|
||||||
|
|
||||||
|
These patterns were analyzed and classified as **not critical**:
|
||||||
|
|
||||||
|
### 6. Async Task Management
|
||||||
|
|
||||||
|
**Current Status:** Fire-and-forget pattern for short tasks
|
||||||
|
- `sensor/core.py` → Chart data refresh (short-lived, max 1-2 seconds)
|
||||||
|
- `coordinator/core.py` → Cache storage (short-lived, max 100ms)
|
||||||
|
|
||||||
|
**Why no tests needed:**
|
||||||
|
- No long-running tasks (all < 2 seconds)
|
||||||
|
- HA's event loop handles short tasks automatically
|
||||||
|
- Task exceptions are already logged
|
||||||
|
|
||||||
|
**If needed:** `_chart_refresh_task` tracking + cancel in `async_will_remove_from_hass()`
|
||||||
|
|
||||||
|
### 7. API Session Cleanup
|
||||||
|
|
||||||
|
**Current Status:** ✅ Correctly implemented
|
||||||
|
- `async_get_clientsession(hass)` is used (shared session)
|
||||||
|
- No new sessions are created
|
||||||
|
- HA manages session lifecycle automatically
|
||||||
|
|
||||||
|
**Code:** `api/client.py` + `__init__.py`
|
||||||
|
|
||||||
|
### 8. Translation Cache Memory
|
||||||
|
|
||||||
|
**Current Status:** ✅ Bounded cache
|
||||||
|
- Max ~5-10 languages × 5KB = 50KB total
|
||||||
|
- Module-level cache without re-loading
|
||||||
|
- Practically no memory issue
|
||||||
|
|
||||||
|
**Code:** `const.py` → `_TRANSLATIONS_CACHE`, `_STANDARD_TRANSLATIONS_CACHE`
|
||||||
|
|
||||||
|
### 9. Coordinator Data Structure Integrity
|
||||||
|
|
||||||
|
**Current Status:** Manually tested via `./scripts/develop`
|
||||||
|
- Midnight turnover works correctly (observed over several days)
|
||||||
|
- Missing keys are handled via `.get()` with defaults
|
||||||
|
- 80+ sensors access `coordinator.data` without errors
|
||||||
|
|
||||||
|
**Structure:**
|
||||||
|
```python
|
||||||
|
coordinator.data = {
|
||||||
|
"user_data": {...},
|
||||||
|
"priceInfo": [...], # Flat list of all enriched intervals
|
||||||
|
"currency": "EUR" # Top-level for easy access
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 10. Service Response Memory
|
||||||
|
|
||||||
|
**Current Status:** HA's response lifecycle
|
||||||
|
- HA automatically frees service responses after return
|
||||||
|
- ApexCharts ~20KB response is one-time per call
|
||||||
|
- No response accumulation in integration code
|
||||||
|
|
||||||
|
**Code:** `services/apexcharts.py`
|
||||||
|
|
||||||
|
## 📊 Test Coverage Status
|
||||||
|
|
||||||
|
### ✅ Implemented Tests (41 total)
|
||||||
|
|
||||||
|
| Category | Status | Tests | File | Coverage |
|
||||||
|
|----------|--------|-------|------|----------|
|
||||||
|
| Listener Cleanup | ✅ | 5 | `test_resource_cleanup.py` | 100% |
|
||||||
|
| Timer Cleanup | ✅ | 4 | `test_resource_cleanup.py` | 100% |
|
||||||
|
| Config Entry Cleanup | ✅ | 1 | `test_resource_cleanup.py` | 100% |
|
||||||
|
| Cache Invalidation | ✅ | 3 | `test_resource_cleanup.py` | 100% |
|
||||||
|
| Storage Cleanup | ✅ | 1 | `test_resource_cleanup.py` | 100% |
|
||||||
|
| Storage Persistence | ✅ | 2 | `test_coordinator_shutdown.py` | 100% |
|
||||||
|
| Timer Scheduling | ✅ | 8 | `test_timer_scheduling.py` | 100% |
|
||||||
|
| Sensor-Timer Assignment | ✅ | 17 | `test_sensor_timer_assignment.py` | 100% |
|
||||||
|
| **TOTAL** | **✅** | **41** | | **100% (critical)** |
|
||||||
|
|
||||||
|
### 📋 Analyzed but Not Implemented (Nice-to-Have)
|
||||||
|
|
||||||
|
| Category | Status | Rationale |
|
||||||
|
|----------|--------|-----------|
|
||||||
|
| Async Task Management | 📋 | Fire-and-forget pattern used (no long-running tasks) |
|
||||||
|
| API Session Cleanup | ✅ | Pattern correct (`async_get_clientsession` used) |
|
||||||
|
| Translation Cache | ✅ | Cache size bounded (~50KB max for 10 languages) |
|
||||||
|
| Data Structure Integrity | 📋 | Would add test time without finding real issues |
|
||||||
|
| Service Response Memory | 📋 | HA automatically frees service responses |
|
||||||
|
|
||||||
|
**Legend:**
|
||||||
|
- ✅ = Fully tested or pattern verified correct
|
||||||
|
- 📋 = Analyzed, low priority for testing (no known issues)
|
||||||
|
|
||||||
|
## 🎯 Development Status
|
||||||
|
|
||||||
|
### ✅ All Critical Patterns Tested
|
||||||
|
|
||||||
|
All essential memory leak prevention patterns are covered by 41 tests:
|
||||||
|
- ✅ Listeners are correctly removed (no callback leaks)
|
||||||
|
- ✅ Timers are cancelled (no background task leaks)
|
||||||
|
- ✅ Config entry cleanup works (no dangling listeners)
|
||||||
|
- ✅ Caches are invalidated (no stale data issues)
|
||||||
|
- ✅ Storage is saved and cleaned up (no data loss)
|
||||||
|
- ✅ Timer scheduling works correctly (no update issues)
|
||||||
|
- ✅ Sensor-timer assignment is correct (no wrong updates)
|
||||||
|
|
||||||
|
### 📋 Nice-to-Have Tests (Optional)
|
||||||
|
|
||||||
|
If problems arise in the future, these tests can be added:
|
||||||
|
|
||||||
|
1. **Async Task Management** - Pattern analyzed (fire-and-forget for short tasks)
|
||||||
|
2. **Data Structure Integrity** - Midnight rotation manually tested
|
||||||
|
3. **Service Response Memory** - HA's response lifecycle automatic
|
||||||
|
|
||||||
|
**Conclusion:** The integration has production-quality test coverage for all critical resource leak patterns.
|
||||||
|
|
||||||
|
## 🔍 How to Run Tests
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run all resource cleanup tests (14 tests)
|
||||||
|
./scripts/test tests/test_resource_cleanup.py -v
|
||||||
|
|
||||||
|
# Run all critical pattern tests (41 tests)
|
||||||
|
./scripts/test tests/test_resource_cleanup.py tests/test_coordinator_shutdown.py \
|
||||||
|
tests/test_timer_scheduling.py tests/test_sensor_timer_assignment.py -v
|
||||||
|
|
||||||
|
# Run all tests with coverage
|
||||||
|
./scripts/test --cov=custom_components.tibber_prices --cov-report=html
|
||||||
|
|
||||||
|
# Type checking and linting
|
||||||
|
./scripts/check
|
||||||
|
|
||||||
|
# Manual memory leak test
|
||||||
|
# 1. Start HA: ./scripts/develop
|
||||||
|
# 2. Monitor RAM: watch -n 1 'ps aux | grep home-assistant'
|
||||||
|
# 3. Reload integration multiple times (HA UI: Settings → Devices → Tibber Prices → Reload)
|
||||||
|
# 4. RAM should stabilize (not grow continuously)
|
||||||
|
```
|
||||||
|
|
||||||
|
## 📚 References
|
||||||
|
|
||||||
|
- **Home Assistant Cleanup Patterns**: https://developers.home-assistant.io/docs/integration_setup_failures/#cleanup
|
||||||
|
- **Async Best Practices**: https://developers.home-assistant.io/docs/asyncio_101/
|
||||||
|
- **Memory Profiling**: https://docs.python.org/3/library/tracemalloc.html
|
||||||
230
docs/developer/versioned_docs/version-v0.24.0/debugging.md
Normal file
230
docs/developer/versioned_docs/version-v0.24.0/debugging.md
Normal file
|
|
@ -0,0 +1,230 @@
|
||||||
|
# Debugging Guide
|
||||||
|
|
||||||
|
Tips and techniques for debugging the Tibber Prices integration during development.
|
||||||
|
|
||||||
|
## Logging
|
||||||
|
|
||||||
|
### Enable Debug Logging
|
||||||
|
|
||||||
|
Add to `configuration.yaml`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
logger:
|
||||||
|
default: info
|
||||||
|
logs:
|
||||||
|
custom_components.tibber_prices: debug
|
||||||
|
```
|
||||||
|
|
||||||
|
Restart Home Assistant to apply.
|
||||||
|
|
||||||
|
### Key Log Messages
|
||||||
|
|
||||||
|
**Coordinator Updates:**
|
||||||
|
```
|
||||||
|
[custom_components.tibber_prices.coordinator] Successfully fetched price data
|
||||||
|
[custom_components.tibber_prices.coordinator] Cache valid, using cached data
|
||||||
|
[custom_components.tibber_prices.coordinator] Midnight turnover detected, clearing cache
|
||||||
|
```
|
||||||
|
|
||||||
|
**Period Calculation:**
|
||||||
|
```
|
||||||
|
[custom_components.tibber_prices.coordinator.periods] Calculating BEST PRICE periods: flex=15.0%
|
||||||
|
[custom_components.tibber_prices.coordinator.periods] Day 2024-12-06: Found 2 periods
|
||||||
|
[custom_components.tibber_prices.coordinator.periods] Period 1: 02:00-05:00 (12 intervals)
|
||||||
|
```
|
||||||
|
|
||||||
|
**API Errors:**
|
||||||
|
```
|
||||||
|
[custom_components.tibber_prices.api] API request failed: Unauthorized
|
||||||
|
[custom_components.tibber_prices.api] Retrying (attempt 2/3) after 2.0s
|
||||||
|
```
|
||||||
|
|
||||||
|
## VS Code Debugging
|
||||||
|
|
||||||
|
### Launch Configuration
|
||||||
|
|
||||||
|
`.vscode/launch.json`:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"version": "0.2.0",
|
||||||
|
"configurations": [
|
||||||
|
{
|
||||||
|
"name": "Home Assistant",
|
||||||
|
"type": "debugpy",
|
||||||
|
"request": "launch",
|
||||||
|
"module": "homeassistant",
|
||||||
|
"args": ["-c", "config", "--debug"],
|
||||||
|
"justMyCode": false,
|
||||||
|
"env": {
|
||||||
|
"PYTHONPATH": "${workspaceFolder}/.venv/lib/python3.13/site-packages"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Set Breakpoints
|
||||||
|
|
||||||
|
**Coordinator update:**
|
||||||
|
```python
|
||||||
|
# coordinator/core.py
|
||||||
|
async def _async_update_data(self) -> dict:
|
||||||
|
"""Fetch data from API."""
|
||||||
|
breakpoint() # Or set VS Code breakpoint
|
||||||
|
```
|
||||||
|
|
||||||
|
**Period calculation:**
|
||||||
|
```python
|
||||||
|
# coordinator/period_handlers/core.py
|
||||||
|
def calculate_periods(...) -> list[dict]:
|
||||||
|
"""Calculate best/peak price periods."""
|
||||||
|
breakpoint()
|
||||||
|
```
|
||||||
|
|
||||||
|
## pytest Debugging
|
||||||
|
|
||||||
|
### Run Single Test with Output
|
||||||
|
|
||||||
|
```bash
|
||||||
|
.venv/bin/python -m pytest tests/test_period_calculation.py::test_midnight_crossing -v -s
|
||||||
|
```
|
||||||
|
|
||||||
|
**Flags:**
|
||||||
|
- `-v` - Verbose output
|
||||||
|
- `-s` - Show print statements
|
||||||
|
- `-k pattern` - Run tests matching pattern
|
||||||
|
|
||||||
|
### Debug Test in VS Code
|
||||||
|
|
||||||
|
Set breakpoint in test file, use "Debug Test" CodeLens.
|
||||||
|
|
||||||
|
### Useful Test Patterns
|
||||||
|
|
||||||
|
**Print coordinator data:**
|
||||||
|
```python
|
||||||
|
def test_something(coordinator):
|
||||||
|
print(f"Coordinator data: {coordinator.data}")
|
||||||
|
print(f"Price info count: {len(coordinator.data['priceInfo'])}")
|
||||||
|
```
|
||||||
|
|
||||||
|
**Inspect period attributes:**
|
||||||
|
```python
|
||||||
|
def test_periods(hass, coordinator):
|
||||||
|
periods = coordinator.data.get('best_price_periods', [])
|
||||||
|
for period in periods:
|
||||||
|
print(f"Period: {period['start']} to {period['end']}")
|
||||||
|
print(f" Intervals: {len(period['intervals'])}")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Common Issues
|
||||||
|
|
||||||
|
### Integration Not Loading
|
||||||
|
|
||||||
|
**Check:**
|
||||||
|
```bash
|
||||||
|
grep "tibber_prices" config/home-assistant.log
|
||||||
|
```
|
||||||
|
|
||||||
|
**Common causes:**
|
||||||
|
- Syntax error in Python code → Check logs for traceback
|
||||||
|
- Missing dependency → Run `uv sync`
|
||||||
|
- Wrong file permissions → `chmod +x scripts/*`
|
||||||
|
|
||||||
|
### Sensors Not Updating
|
||||||
|
|
||||||
|
**Check coordinator state:**
|
||||||
|
```python
|
||||||
|
# In Developer Tools > Template
|
||||||
|
{{ states.sensor.tibber_home_current_interval_price.last_updated }}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Debug in code:**
|
||||||
|
```python
|
||||||
|
# Add logging in sensor/core.py
|
||||||
|
_LOGGER.debug("Updating sensor %s: old=%s new=%s",
|
||||||
|
self.entity_id, self._attr_native_value, new_value)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Period Calculation Wrong
|
||||||
|
|
||||||
|
**Enable detailed period logs:**
|
||||||
|
```python
|
||||||
|
# coordinator/period_handlers/period_building.py
|
||||||
|
_LOGGER.debug("Candidate intervals: %s",
|
||||||
|
[(i['startsAt'], i['total']) for i in candidates])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Check filter statistics:**
|
||||||
|
```
|
||||||
|
[period_building] Flex filter blocked: 45 intervals
|
||||||
|
[period_building] Min distance blocked: 12 intervals
|
||||||
|
[period_building] Level filter blocked: 8 intervals
|
||||||
|
```
|
||||||
|
|
||||||
|
## Performance Profiling
|
||||||
|
|
||||||
|
### Time Execution
|
||||||
|
|
||||||
|
```python
|
||||||
|
import time
|
||||||
|
|
||||||
|
start = time.perf_counter()
|
||||||
|
result = expensive_function()
|
||||||
|
duration = time.perf_counter() - start
|
||||||
|
_LOGGER.debug("Function took %.3fs", duration)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Memory Usage
|
||||||
|
|
||||||
|
```python
|
||||||
|
import tracemalloc
|
||||||
|
|
||||||
|
tracemalloc.start()
|
||||||
|
# ... your code ...
|
||||||
|
current, peak = tracemalloc.get_traced_memory()
|
||||||
|
_LOGGER.debug("Memory: current=%d peak=%d", current, peak)
|
||||||
|
tracemalloc.stop()
|
||||||
|
```
|
||||||
|
|
||||||
|
### Profile with cProfile
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -m cProfile -o profile.stats -m homeassistant -c config
|
||||||
|
python -m pstats profile.stats
|
||||||
|
# Then: sort cumtime, stats 20
|
||||||
|
```
|
||||||
|
|
||||||
|
## Live Debugging in Running HA
|
||||||
|
|
||||||
|
### Remote Debugging with debugpy
|
||||||
|
|
||||||
|
Add to coordinator code:
|
||||||
|
```python
|
||||||
|
import debugpy
|
||||||
|
debugpy.listen(5678)
|
||||||
|
_LOGGER.info("Waiting for debugger attach on port 5678")
|
||||||
|
debugpy.wait_for_client()
|
||||||
|
```
|
||||||
|
|
||||||
|
Connect from VS Code with remote attach configuration.
|
||||||
|
|
||||||
|
### IPython REPL
|
||||||
|
|
||||||
|
Install in container:
|
||||||
|
```bash
|
||||||
|
uv pip install ipython
|
||||||
|
```
|
||||||
|
|
||||||
|
Add breakpoint:
|
||||||
|
```python
|
||||||
|
from IPython import embed
|
||||||
|
embed() # Drops into interactive shell
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
💡 **Related:**
|
||||||
|
- [Testing Guide](testing.md) - Writing and running tests
|
||||||
|
- [Setup Guide](setup.md) - Development environment
|
||||||
|
- [Architecture](architecture.md) - Code structure
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue