mirror of
https://github.com/jpawlowski/hass.tibber_prices.git
synced 2026-03-30 21:33:39 +00:00
Compare commits
No commits in common. "main" and "v0.18.0" have entirely different histories.
455 changed files with 5226 additions and 122381 deletions
|
|
@ -1,29 +1,18 @@
|
||||||
{
|
{
|
||||||
"name": "jpawlowski/hass.tibber_prices",
|
"name": "jpawlowski/hass.tibber_prices",
|
||||||
"image": "mcr.microsoft.com/devcontainers/python:3.14",
|
"image": "mcr.microsoft.com/devcontainers/python:3.13",
|
||||||
"postCreateCommand": "bash .devcontainer/setup-git.sh && scripts/setup/setup",
|
"postCreateCommand": "bash .devcontainer/setup-git.sh && scripts/setup/setup",
|
||||||
"postStartCommand": "scripts/motd",
|
"postStartCommand": "scripts/motd",
|
||||||
"containerEnv": {
|
"containerEnv": {
|
||||||
"PYTHONASYNCIODEBUG": "1",
|
"PYTHONASYNCIODEBUG": "1"
|
||||||
"TIBBER_PRICES_DEV": "1"
|
|
||||||
},
|
},
|
||||||
"forwardPorts": [
|
"forwardPorts": [
|
||||||
8123,
|
8123
|
||||||
3000,
|
|
||||||
3001
|
|
||||||
],
|
],
|
||||||
"portsAttributes": {
|
"portsAttributes": {
|
||||||
"8123": {
|
"8123": {
|
||||||
"label": "Home Assistant",
|
"label": "Home Assistant",
|
||||||
"onAutoForward": "notify"
|
"onAutoForward": "notify"
|
||||||
},
|
|
||||||
"3000": {
|
|
||||||
"label": "Docusaurus User Docs",
|
|
||||||
"onAutoForward": "notify"
|
|
||||||
},
|
|
||||||
"3001": {
|
|
||||||
"label": "Docusaurus Developer Docs",
|
|
||||||
"onAutoForward": "notify"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"customizations": {
|
"customizations": {
|
||||||
|
|
@ -70,7 +59,7 @@
|
||||||
],
|
],
|
||||||
"python.defaultInterpreterPath": "${workspaceFolder}/.venv/bin/python",
|
"python.defaultInterpreterPath": "${workspaceFolder}/.venv/bin/python",
|
||||||
"python.analysis.extraPaths": [
|
"python.analysis.extraPaths": [
|
||||||
"${workspaceFolder}/.venv/lib/python3.14/site-packages"
|
"${workspaceFolder}/.venv/lib/python3.13/site-packages"
|
||||||
],
|
],
|
||||||
"python.terminal.activateEnvironment": true,
|
"python.terminal.activateEnvironment": true,
|
||||||
"python.terminal.activateEnvInCurrentTerminal": true,
|
"python.terminal.activateEnvInCurrentTerminal": true,
|
||||||
|
|
|
||||||
4
.github/FUNDING.yml
vendored
4
.github/FUNDING.yml
vendored
|
|
@ -1,4 +0,0 @@
|
||||||
# These are supported funding model platforms
|
|
||||||
|
|
||||||
github: [ jpawlowski ]
|
|
||||||
buy_me_a_coffee: jpawlowski
|
|
||||||
8
.github/workflows/auto-tag.yml
vendored
8
.github/workflows/auto-tag.yml
vendored
|
|
@ -20,7 +20,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v6.0.1
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0 # Need full history for git describe
|
fetch-depth: 0 # Need full history for git describe
|
||||||
|
|
||||||
|
|
@ -43,13 +43,13 @@ jobs:
|
||||||
echo "✗ Tag v${{ steps.manifest.outputs.version }} does not exist yet"
|
echo "✗ Tag v${{ steps.manifest.outputs.version }} does not exist yet"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Validate version format (stable or beta)
|
- name: Validate version format
|
||||||
if: steps.tag_check.outputs.exists == 'false'
|
if: steps.tag_check.outputs.exists == 'false'
|
||||||
run: |
|
run: |
|
||||||
VERSION="${{ steps.manifest.outputs.version }}"
|
VERSION="${{ steps.manifest.outputs.version }}"
|
||||||
if ! echo "$VERSION" | grep -qE '^[0-9]+\.[0-9]+\.[0-9]+(b[0-9]+)?$'; then
|
if ! echo "$VERSION" | grep -qE '^[0-9]+\.[0-9]+\.[0-9]+$'; then
|
||||||
echo "❌ Invalid version format: $VERSION"
|
echo "❌ Invalid version format: $VERSION"
|
||||||
echo "Expected format: X.Y.Z or X.Y.ZbN (e.g., 1.0.0, 0.25.0b0)"
|
echo "Expected format: X.Y.Z (e.g., 1.0.0)"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo "✓ Version format valid: $VERSION"
|
echo "✓ Version format valid: $VERSION"
|
||||||
|
|
|
||||||
163
.github/workflows/docusaurus.yml
vendored
163
.github/workflows/docusaurus.yml
vendored
|
|
@ -1,163 +0,0 @@
|
||||||
name: Deploy Docusaurus Documentation (Dual Sites)
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [main]
|
|
||||||
paths:
|
|
||||||
- 'docs/**'
|
|
||||||
- '.github/workflows/docusaurus.yml'
|
|
||||||
tags:
|
|
||||||
- 'v*.*.*'
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
# Concurrency control: cancel in-progress deployments
|
|
||||||
# Pattern from GitHub Actions best practices for deployment workflows
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
pages: write
|
|
||||||
id-token: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
deploy:
|
|
||||||
name: Build and Deploy Documentation Sites
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
environment:
|
|
||||||
name: github-pages
|
|
||||||
url: ${{ steps.deployment.outputs.page_url }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v6
|
|
||||||
with:
|
|
||||||
fetch-depth: 0 # Needed for version timestamps
|
|
||||||
|
|
||||||
- name: Detect prerelease tag (beta/rc)
|
|
||||||
id: taginfo
|
|
||||||
run: |
|
|
||||||
if [[ "${GITHUB_REF}" =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+(b[0-9]+|rc[0-9]+)$ ]]; then
|
|
||||||
echo "is_prerelease=true" >> "$GITHUB_OUTPUT"
|
|
||||||
echo "Detected prerelease tag: ${GITHUB_REF}"
|
|
||||||
else
|
|
||||||
echo "is_prerelease=false" >> "$GITHUB_OUTPUT"
|
|
||||||
echo "Stable tag or branch: ${GITHUB_REF}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
- uses: actions/setup-node@v6
|
|
||||||
with:
|
|
||||||
node-version: 24
|
|
||||||
cache: 'npm'
|
|
||||||
cache-dependency-path: |
|
|
||||||
docs/user/package-lock.json
|
|
||||||
docs/developer/package-lock.json
|
|
||||||
|
|
||||||
# USER DOCS BUILD
|
|
||||||
- name: Install user docs dependencies
|
|
||||||
working-directory: docs/user
|
|
||||||
run: npm ci
|
|
||||||
|
|
||||||
- name: Create user docs version snapshot on tag
|
|
||||||
if: startsWith(github.ref, 'refs/tags/v') && steps.taginfo.outputs.is_prerelease != 'true'
|
|
||||||
working-directory: docs/user
|
|
||||||
run: |
|
|
||||||
TAG_VERSION=${GITHUB_REF#refs/tags/}
|
|
||||||
echo "Creating user documentation version: $TAG_VERSION"
|
|
||||||
|
|
||||||
npm run docusaurus docs:version $TAG_VERSION || echo "Version already exists"
|
|
||||||
|
|
||||||
# Update GitHub links in versioned docs
|
|
||||||
if [ -d "versioned_docs/version-$TAG_VERSION" ]; then
|
|
||||||
find versioned_docs/version-$TAG_VERSION -name "*.md" -type f -exec sed -i "s|github.com/jpawlowski/hass.tibber_prices/blob/main/|github.com/jpawlowski/hass.tibber_prices/blob/$TAG_VERSION/|g" {} \; || true
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Cleanup old user docs versions
|
|
||||||
if: startsWith(github.ref, 'refs/tags/v') && steps.taginfo.outputs.is_prerelease != 'true'
|
|
||||||
working-directory: docs/user
|
|
||||||
run: |
|
|
||||||
chmod +x ../cleanup-old-versions.sh
|
|
||||||
# Adapt script for single-instance mode (versioned_docs/ instead of user_versioned_docs/)
|
|
||||||
sed 's/user_versioned_docs/versioned_docs/g; s/user_versions.json/versions.json/g; s/developer_versioned_docs/versioned_docs/g; s/developer_versions.json/versions.json/g' ../cleanup-old-versions.sh > cleanup-single.sh
|
|
||||||
chmod +x cleanup-single.sh
|
|
||||||
./cleanup-single.sh
|
|
||||||
|
|
||||||
- name: Build user docs website
|
|
||||||
working-directory: docs/user
|
|
||||||
run: npm run build
|
|
||||||
|
|
||||||
# DEVELOPER DOCS BUILD
|
|
||||||
- name: Install developer docs dependencies
|
|
||||||
working-directory: docs/developer
|
|
||||||
run: npm ci
|
|
||||||
|
|
||||||
- name: Create developer docs version snapshot on tag
|
|
||||||
if: startsWith(github.ref, 'refs/tags/v') && steps.taginfo.outputs.is_prerelease != 'true'
|
|
||||||
working-directory: docs/developer
|
|
||||||
run: |
|
|
||||||
TAG_VERSION=${GITHUB_REF#refs/tags/}
|
|
||||||
echo "Creating developer documentation version: $TAG_VERSION"
|
|
||||||
|
|
||||||
npm run docusaurus docs:version $TAG_VERSION || echo "Version already exists"
|
|
||||||
|
|
||||||
# Update GitHub links in versioned docs
|
|
||||||
if [ -d "versioned_docs/version-$TAG_VERSION" ]; then
|
|
||||||
find versioned_docs/version-$TAG_VERSION -name "*.md" -type f -exec sed -i "s|github.com/jpawlowski/hass.tibber_prices/blob/main/|github.com/jpawlowski/hass.tibber_prices/blob/$TAG_VERSION/|g" {} \; || true
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Cleanup old developer docs versions
|
|
||||||
if: startsWith(github.ref, 'refs/tags/v') && steps.taginfo.outputs.is_prerelease != 'true'
|
|
||||||
working-directory: docs/developer
|
|
||||||
run: |
|
|
||||||
chmod +x ../cleanup-old-versions.sh
|
|
||||||
# Adapt script for single-instance mode
|
|
||||||
sed 's/user_versioned_docs/versioned_docs/g; s/user_versions.json/versions.json/g; s/developer_versioned_docs/versioned_docs/g; s/developer_versions.json/versions.json/g' ../cleanup-old-versions.sh > cleanup-single.sh
|
|
||||||
chmod +x cleanup-single.sh
|
|
||||||
./cleanup-single.sh
|
|
||||||
|
|
||||||
- name: Build developer docs website
|
|
||||||
working-directory: docs/developer
|
|
||||||
run: npm run build
|
|
||||||
|
|
||||||
# MERGE BUILDS
|
|
||||||
- name: Merge both documentation sites
|
|
||||||
run: |
|
|
||||||
mkdir -p deploy-root/user
|
|
||||||
mkdir -p deploy-root/developer
|
|
||||||
cp docs/index.html deploy-root/
|
|
||||||
cp -r docs/user/build/* deploy-root/user/
|
|
||||||
cp -r docs/developer/build/* deploy-root/developer/
|
|
||||||
|
|
||||||
# COMMIT VERSION SNAPSHOTS
|
|
||||||
- name: Commit version snapshots back to repository
|
|
||||||
if: startsWith(github.ref, 'refs/tags/v') && steps.taginfo.outputs.is_prerelease != 'true'
|
|
||||||
run: |
|
|
||||||
TAG_VERSION=${GITHUB_REF#refs/tags/}
|
|
||||||
|
|
||||||
git config user.name "github-actions[bot]"
|
|
||||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
|
||||||
|
|
||||||
# Add version files from both docs
|
|
||||||
git add docs/user/versioned_docs/ docs/user/versions.json 2>/dev/null || true
|
|
||||||
git add docs/developer/versioned_docs/ docs/developer/versions.json 2>/dev/null || true
|
|
||||||
|
|
||||||
# Commit if there are changes
|
|
||||||
if git diff --staged --quiet; then
|
|
||||||
echo "No version snapshot changes to commit"
|
|
||||||
else
|
|
||||||
git commit -m "docs: add version snapshot $TAG_VERSION and cleanup old versions [skip ci]"
|
|
||||||
git push origin HEAD:main
|
|
||||||
echo "Version snapshots committed and pushed to main"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# DEPLOY TO GITHUB PAGES
|
|
||||||
- name: Setup Pages
|
|
||||||
uses: actions/configure-pages@v6
|
|
||||||
|
|
||||||
- name: Upload artifact
|
|
||||||
uses: actions/upload-pages-artifact@v4
|
|
||||||
with:
|
|
||||||
path: ./deploy-root
|
|
||||||
|
|
||||||
- name: Deploy to GitHub Pages
|
|
||||||
id: deployment
|
|
||||||
uses: actions/deploy-pages@v5
|
|
||||||
12
.github/workflows/lint.yml
vendored
12
.github/workflows/lint.yml
vendored
|
|
@ -4,15 +4,9 @@ on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- "main"
|
- "main"
|
||||||
paths-ignore:
|
|
||||||
- 'docs/**'
|
|
||||||
- '.github/workflows/docusaurus.yml'
|
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches:
|
||||||
- "main"
|
- "main"
|
||||||
paths-ignore:
|
|
||||||
- 'docs/**'
|
|
||||||
- '.github/workflows/docusaurus.yml'
|
|
||||||
|
|
||||||
permissions: {}
|
permissions: {}
|
||||||
|
|
||||||
|
|
@ -29,12 +23,12 @@ jobs:
|
||||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
|
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||||
with:
|
with:
|
||||||
python-version: "3.14"
|
python-version: "3.13"
|
||||||
|
|
||||||
- name: Install uv
|
- name: Install uv
|
||||||
uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7.6.0
|
uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4
|
||||||
with:
|
with:
|
||||||
version: "0.9.3"
|
version: "0.9.3"
|
||||||
|
|
||||||
|
|
|
||||||
61
.github/workflows/release.yml
vendored
61
.github/workflows/release.yml
vendored
|
|
@ -27,7 +27,7 @@ jobs:
|
||||||
version: ${{ steps.tag.outputs.version }}
|
version: ${{ steps.tag.outputs.version }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v6.0.1
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
token: ${{ secrets.GITHUB_TOKEN }}
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
@ -106,7 +106,7 @@ jobs:
|
||||||
needs: sync-manifest # Wait for manifest sync to complete
|
needs: sync-manifest # Wait for manifest sync to complete
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v6.0.1
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0 # Fetch all history for git-cliff
|
fetch-depth: 0 # Fetch all history for git-cliff
|
||||||
ref: main # Use updated main branch if manifest was synced
|
ref: main # Use updated main branch if manifest was synced
|
||||||
|
|
@ -135,20 +135,10 @@ jobs:
|
||||||
FEAT=$(echo "$COMMITS" | grep -cE "^feat(\(.+\))?:" || true)
|
FEAT=$(echo "$COMMITS" | grep -cE "^feat(\(.+\))?:" || true)
|
||||||
FIX=$(echo "$COMMITS" | grep -cE "^fix(\(.+\))?:" || true)
|
FIX=$(echo "$COMMITS" | grep -cE "^fix(\(.+\))?:" || true)
|
||||||
|
|
||||||
parse_version() {
|
# Parse versions
|
||||||
local version="$1"
|
|
||||||
if [[ $version =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)(b[0-9]+)?$ ]]; then
|
|
||||||
echo "${BASH_REMATCH[1]} ${BASH_REMATCH[2]} ${BASH_REMATCH[3]} ${BASH_REMATCH[4]}"
|
|
||||||
else
|
|
||||||
echo "Invalid version format: $version" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Parse versions (support beta/prerelease suffix like 0.25.0b0)
|
|
||||||
PREV_VERSION="${PREV_TAG#v}"
|
PREV_VERSION="${PREV_TAG#v}"
|
||||||
read -r PREV_MAJOR PREV_MINOR PREV_PATCH PREV_PRERELEASE <<< "$(parse_version "$PREV_VERSION")"
|
IFS='.' read -r PREV_MAJOR PREV_MINOR PREV_PATCH <<< "$PREV_VERSION"
|
||||||
read -r MAJOR MINOR PATCH PRERELEASE <<< "$(parse_version "$TAG_VERSION")"
|
IFS='.' read -r MAJOR MINOR PATCH <<< "$TAG_VERSION"
|
||||||
|
|
||||||
WARNING=""
|
WARNING=""
|
||||||
SUGGESTION=""
|
SUGGESTION=""
|
||||||
|
|
@ -190,11 +180,9 @@ jobs:
|
||||||
echo "**Commits analyzed:** Breaking=$BREAKING, Features=$FEAT, Fixes=$FIX"
|
echo "**Commits analyzed:** Breaking=$BREAKING, Features=$FEAT, Fixes=$FIX"
|
||||||
echo ""
|
echo ""
|
||||||
echo "**To fix:**"
|
echo "**To fix:**"
|
||||||
echo "1. Run locally: \`./scripts/release/suggest-version\`"
|
echo "1. Delete the tag: \`git tag -d v$TAG_VERSION && git push origin :refs/tags/v$TAG_VERSION\`"
|
||||||
echo "2. Create correct tag: \`./scripts/release/prepare <suggested-version>\`"
|
echo "2. Run locally: \`./scripts/release/suggest-version\`"
|
||||||
echo "3. Push the corrected tag: \`git push origin v<suggested-version>\`"
|
echo "3. Create correct tag: \`./scripts/release/prepare X.Y.Z\`"
|
||||||
echo ""
|
|
||||||
echo "**This tag will be automatically deleted in the next step.**"
|
|
||||||
echo "EOF"
|
echo "EOF"
|
||||||
} >> $GITHUB_OUTPUT
|
} >> $GITHUB_OUTPUT
|
||||||
else
|
else
|
||||||
|
|
@ -202,19 +190,7 @@ jobs:
|
||||||
echo "warning=" >> $GITHUB_OUTPUT
|
echo "warning=" >> $GITHUB_OUTPUT
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Delete inappropriate version tag
|
|
||||||
if: steps.version_check.outputs.warning != ''
|
|
||||||
run: |
|
|
||||||
TAG_NAME="${GITHUB_REF#refs/tags/}"
|
|
||||||
echo "❌ Deleting tag $TAG_NAME (version not appropriate for changes)"
|
|
||||||
echo ""
|
|
||||||
echo "${{ steps.version_check.outputs.warning }}"
|
|
||||||
echo ""
|
|
||||||
git push origin --delete "$TAG_NAME"
|
|
||||||
exit 1
|
|
||||||
|
|
||||||
- name: Install git-cliff
|
- name: Install git-cliff
|
||||||
if: steps.version_check.outputs.warning == ''
|
|
||||||
run: |
|
run: |
|
||||||
wget https://github.com/orhun/git-cliff/releases/download/v2.4.0/git-cliff-2.4.0-x86_64-unknown-linux-gnu.tar.gz
|
wget https://github.com/orhun/git-cliff/releases/download/v2.4.0/git-cliff-2.4.0-x86_64-unknown-linux-gnu.tar.gz
|
||||||
tar -xzf git-cliff-2.4.0-x86_64-unknown-linux-gnu.tar.gz
|
tar -xzf git-cliff-2.4.0-x86_64-unknown-linux-gnu.tar.gz
|
||||||
|
|
@ -222,7 +198,6 @@ jobs:
|
||||||
git-cliff --version
|
git-cliff --version
|
||||||
|
|
||||||
- name: Generate release notes
|
- name: Generate release notes
|
||||||
if: steps.version_check.outputs.warning == ''
|
|
||||||
id: release_notes
|
id: release_notes
|
||||||
run: |
|
run: |
|
||||||
FROM_TAG="${{ steps.previoustag.outputs.previous_tag }}"
|
FROM_TAG="${{ steps.previoustag.outputs.previous_tag }}"
|
||||||
|
|
@ -241,6 +216,15 @@ jobs:
|
||||||
fi
|
fi
|
||||||
echo "title=$TITLE" >> $GITHUB_OUTPUT
|
echo "title=$TITLE" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
# Append version warning if present
|
||||||
|
WARNING="${{ steps.version_check.outputs.warning }}"
|
||||||
|
if [ -n "$WARNING" ]; then
|
||||||
|
echo "" >> release-notes.md
|
||||||
|
echo "---" >> release-notes.md
|
||||||
|
echo "" >> release-notes.md
|
||||||
|
echo "$WARNING" >> release-notes.md
|
||||||
|
fi
|
||||||
|
|
||||||
# Output for GitHub Actions
|
# Output for GitHub Actions
|
||||||
{
|
{
|
||||||
echo 'notes<<EOF'
|
echo 'notes<<EOF'
|
||||||
|
|
@ -248,20 +232,25 @@ jobs:
|
||||||
echo EOF
|
echo EOF
|
||||||
} >> $GITHUB_OUTPUT
|
} >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Version Check Summary
|
||||||
|
if: steps.version_check.outputs.warning != ''
|
||||||
|
run: |
|
||||||
|
echo "### ⚠️ Version Mismatch Detected" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "${{ steps.version_check.outputs.warning }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
- name: Create GitHub Release
|
- name: Create GitHub Release
|
||||||
if: steps.version_check.outputs.warning == ''
|
|
||||||
uses: softprops/action-gh-release@v2
|
uses: softprops/action-gh-release@v2
|
||||||
with:
|
with:
|
||||||
name: ${{ steps.release_notes.outputs.title }}
|
name: ${{ steps.release_notes.outputs.title }}
|
||||||
body: ${{ steps.release_notes.outputs.notes }}
|
body: ${{ steps.release_notes.outputs.notes }}
|
||||||
draft: false
|
draft: false
|
||||||
prerelease: ${{ contains(github.ref, 'b') }}
|
prerelease: false
|
||||||
generate_release_notes: false # We provide our own
|
generate_release_notes: false # We provide our own
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Summary
|
- name: Summary
|
||||||
if: steps.version_check.outputs.warning == ''
|
|
||||||
run: |
|
run: |
|
||||||
echo "✅ Release notes generated and published!" >> $GITHUB_STEP_SUMMARY
|
echo "✅ Release notes generated and published!" >> $GITHUB_STEP_SUMMARY
|
||||||
echo "" >> $GITHUB_STEP_SUMMARY
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
|
||||||
8
.github/workflows/validate.yml
vendored
8
.github/workflows/validate.yml
vendored
|
|
@ -7,15 +7,9 @@ on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
paths-ignore:
|
|
||||||
- 'docs/**'
|
|
||||||
- '.github/workflows/docusaurus.yml'
|
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
paths-ignore:
|
|
||||||
- 'docs/**'
|
|
||||||
- '.github/workflows/docusaurus.yml'
|
|
||||||
|
|
||||||
permissions: {}
|
permissions: {}
|
||||||
|
|
||||||
|
|
@ -32,7 +26,7 @@ jobs:
|
||||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
|
|
||||||
- name: Run hassfest validation
|
- name: Run hassfest validation
|
||||||
uses: home-assistant/actions/hassfest@d56d093b9ab8d2105bc0cb6ee9bcc0ef4ec8b96d # master
|
uses: home-assistant/actions/hassfest@6778c32c6da322382854bd824e30fd4a4f3c20e5 # master
|
||||||
|
|
||||||
hacs: # https://github.com/hacs/action
|
hacs: # https://github.com/hacs/action
|
||||||
name: HACS validation
|
name: HACS validation
|
||||||
|
|
|
||||||
90
AGENTS.md
90
AGENTS.md
|
|
@ -17,18 +17,14 @@ _Note: When proposing significant updates to this file, update the metadata abov
|
||||||
|
|
||||||
When working with the codebase, Copilot MUST actively maintain consistency between this documentation and the actual code:
|
When working with the codebase, Copilot MUST actively maintain consistency between this documentation and the actual code:
|
||||||
|
|
||||||
**Scope:** "This documentation" and "this file" refer specifically to `AGENTS.md` in the repository root. This does NOT include user-facing documentation like `README.md`, Docusaurus sites, or comments in code. Those serve different purposes and are maintained separately.
|
**Scope:** "This documentation" and "this file" refer specifically to `AGENTS.md` in the repository root. This does NOT include user-facing documentation like `README.md`, `/docs/user/`, or comments in code. Those serve different purposes and are maintained separately.
|
||||||
|
|
||||||
**Documentation Organization:**
|
**Documentation Organization:**
|
||||||
|
|
||||||
- **This file** (`AGENTS.md`): AI/Developer long-term memory, patterns, conventions
|
- **This file** (`AGENTS.md`): AI/Developer long-term memory, patterns, conventions
|
||||||
- **`docs/user/`**: Docusaurus site for end-users (installation, configuration, usage examples)
|
- **`docs/user/`**: End-user guides (installation, configuration, usage examples)
|
||||||
- Markdown files in `docs/user/docs/*.md`
|
- **`docs/development/`**: Contributor guides (setup, architecture, release management)
|
||||||
- Navigation managed via `docs/user/sidebars.ts`
|
- **`README.md`**: Project overview with links to detailed documentation
|
||||||
- **`docs/developer/`**: Docusaurus site for contributors (architecture, development guides)
|
|
||||||
- Markdown files in `docs/developer/docs/*.md`
|
|
||||||
- Navigation managed via `docs/developer/sidebars.ts`
|
|
||||||
- **`README.md`**: Project overview with links to documentation sites
|
|
||||||
|
|
||||||
**Automatic Inconsistency Detection:**
|
**Automatic Inconsistency Detection:**
|
||||||
|
|
||||||
|
|
@ -422,7 +418,7 @@ After successful refactoring:
|
||||||
- **Architecture Benefits**: 42% line reduction in core.py (2,170 → 1,268 lines), clear separation of concerns, improved testability, reusable components
|
- **Architecture Benefits**: 42% line reduction in core.py (2,170 → 1,268 lines), clear separation of concerns, improved testability, reusable components
|
||||||
- **See "Common Tasks" section** for detailed patterns and examples
|
- **See "Common Tasks" section** for detailed patterns and examples
|
||||||
- **Quarter-hour precision**: Entities update on 00/15/30/45-minute boundaries via `schedule_quarter_hour_refresh()` in `coordinator/listeners.py`, not just on data fetch intervals. Uses `async_track_utc_time_change(minute=[0, 15, 30, 45], second=0)` for absolute-time scheduling. Smart boundary tolerance (±2 seconds) in `sensor/helpers.py` → `round_to_nearest_quarter_hour()` handles HA scheduling jitter: if HA triggers at 14:59:58 → rounds to 15:00:00 (next interval), if HA restarts at 14:59:30 → stays at 14:45:00 (current interval). This ensures current price sensors update without waiting for the next API poll, while preventing premature data display during normal operation.
|
- **Quarter-hour precision**: Entities update on 00/15/30/45-minute boundaries via `schedule_quarter_hour_refresh()` in `coordinator/listeners.py`, not just on data fetch intervals. Uses `async_track_utc_time_change(minute=[0, 15, 30, 45], second=0)` for absolute-time scheduling. Smart boundary tolerance (±2 seconds) in `sensor/helpers.py` → `round_to_nearest_quarter_hour()` handles HA scheduling jitter: if HA triggers at 14:59:58 → rounds to 15:00:00 (next interval), if HA restarts at 14:59:30 → stays at 14:45:00 (current interval). This ensures current price sensors update without waiting for the next API poll, while preventing premature data display during normal operation.
|
||||||
- **Currency handling**: Multi-currency support with base/sub units (e.g., EUR/ct, NOK/øre) via `get_currency_info()` and `format_price_unit_*()` in `const.py`.
|
- **Currency handling**: Multi-currency support with major/minor units (e.g., EUR/ct, NOK/øre) via `get_currency_info()` and `format_price_unit_*()` in `const.py`.
|
||||||
- **Intelligent caching strategy**: Minimizes API calls while ensuring data freshness:
|
- **Intelligent caching strategy**: Minimizes API calls while ensuring data freshness:
|
||||||
- User data cached for 24h (rarely changes)
|
- User data cached for 24h (rarely changes)
|
||||||
- Price data validated against calendar day - cleared on midnight turnover to force fresh fetch
|
- Price data validated against calendar day - cleared on midnight turnover to force fresh fetch
|
||||||
|
|
@ -741,9 +737,9 @@ When debugging period calculation issues:
|
||||||
4. Check relaxation warnings: INFO at 25%, WARNING at 30% indicate suboptimal config
|
4. Check relaxation warnings: INFO at 25%, WARNING at 30% indicate suboptimal config
|
||||||
|
|
||||||
**See:**
|
**See:**
|
||||||
- **Theory documentation**: `docs/developer/docs/period-calculation-theory.md` (comprehensive mathematical analysis, conflict conditions, configuration pitfalls)
|
- **Theory documentation**: `docs/development/period-calculation-theory.md` (comprehensive mathematical analysis, conflict conditions, configuration pitfalls)
|
||||||
- **Implementation**: `coordinator/period_handlers/` package (core.py, relaxation.py, level_filtering.py, period_building.py)
|
- **Implementation**: `coordinator/period_handlers/` package (core.py, relaxation.py, level_filtering.py, period_building.py)
|
||||||
- **User guide**: `docs/user/docs/period-calculation.md` (simplified user-facing explanations)
|
- **User guide**: `docs/user/period-calculation.md` (simplified user-facing explanations)
|
||||||
|
|
||||||
## Development Environment Setup
|
## Development Environment Setup
|
||||||
|
|
||||||
|
|
@ -1812,17 +1808,6 @@ When using `DataUpdateCoordinator`, entities get updates automatically. Only imp
|
||||||
**4. Service Response Declaration:**
|
**4. Service Response Declaration:**
|
||||||
Services returning data MUST declare `supports_response` parameter. Use `SupportsResponse.ONLY` for data-only services, `OPTIONAL` for dual-purpose, `NONE` for action-only. See `services.py` for examples.
|
Services returning data MUST declare `supports_response` parameter. Use `SupportsResponse.ONLY` for data-only services, `OPTIONAL` for dual-purpose, `NONE` for action-only. See `services.py` for examples.
|
||||||
|
|
||||||
**5. Entity Lifecycle & State Management:**
|
|
||||||
All entities MUST implement these patterns for proper HA integration:
|
|
||||||
|
|
||||||
- **`available` property**: Indicates if entity can be read/controlled. Return `False` when coordinator has no data yet or last update failed. See `entity.py` for base implementation. Special cases (e.g., `connection` binary_sensor) override to always return `True`.
|
|
||||||
|
|
||||||
- **State Restore**: Inherit from `RestoreSensor` (sensors) or `RestoreEntity` (binary_sensors) to restore state after HA restart. Eliminates "unavailable" gaps in history. Restore logic in `async_added_to_hass()` using `async_get_last_state()` and `async_get_last_sensor_data()`. See `sensor/core.py` and `binary_sensor/core.py` for implementation.
|
|
||||||
|
|
||||||
- **`force_update` property**: Set to `True` for entities where every state change should be recorded, even if value unchanged (e.g., `connection` sensor tracking connectivity issues). Default is `False`. See `binary_sensor/core.py` for example.
|
|
||||||
|
|
||||||
**Why this matters**: Without `available`, entities show stale data during errors. Without state restore, history has gaps after HA restart. Without `force_update`, repeated state changes aren't visible in history.
|
|
||||||
|
|
||||||
## Code Quality Rules
|
## Code Quality Rules
|
||||||
|
|
||||||
**CRITICAL: See "Linting Best Practices" section for comprehensive type checking (Pyright) and linting (Ruff) guidelines.**
|
**CRITICAL: See "Linting Best Practices" section for comprehensive type checking (Pyright) and linting (Ruff) guidelines.**
|
||||||
|
|
@ -1838,12 +1823,12 @@ This is a Home Assistant standard to avoid naming conflicts between integrations
|
||||||
# ✅ CORRECT - Integration prefix + semantic purpose
|
# ✅ CORRECT - Integration prefix + semantic purpose
|
||||||
class TibberPricesApiClient: # Integration + semantic role
|
class TibberPricesApiClient: # Integration + semantic role
|
||||||
class TibberPricesDataUpdateCoordinator: # Integration + semantic role
|
class TibberPricesDataUpdateCoordinator: # Integration + semantic role
|
||||||
class TibberPricesPriceDataManager: # Integration + semantic role
|
class TibberPricesDataFetcher: # Integration + semantic role
|
||||||
class TibberPricesSensor: # Integration + entity type
|
class TibberPricesSensor: # Integration + entity type
|
||||||
class TibberPricesEntity: # Integration + entity type
|
class TibberPricesEntity: # Integration + entity type
|
||||||
|
|
||||||
# ❌ INCORRECT - Missing integration prefix
|
# ❌ INCORRECT - Missing integration prefix
|
||||||
class PriceDataManager: # Should be: TibberPricesPriceDataManager
|
class DataFetcher: # Should be: TibberPricesDataFetcher
|
||||||
class TimeService: # Should be: TibberPricesTimeService
|
class TimeService: # Should be: TibberPricesTimeService
|
||||||
class PeriodCalculator: # Should be: TibberPricesPeriodCalculator
|
class PeriodCalculator: # Should be: TibberPricesPeriodCalculator
|
||||||
|
|
||||||
|
|
@ -1855,11 +1840,11 @@ class TibberPricesSensorCalculatorTrend: # Too verbose, import path shows loca
|
||||||
**IMPORTANT:** Do NOT include package hierarchy in class names. Python's import system provides the namespace:
|
**IMPORTANT:** Do NOT include package hierarchy in class names. Python's import system provides the namespace:
|
||||||
```python
|
```python
|
||||||
# The import path IS the full namespace:
|
# The import path IS the full namespace:
|
||||||
from custom_components.tibber_prices.coordinator.price_data_manager import TibberPricesPriceDataManager
|
from custom_components.tibber_prices.coordinator.data_fetching import TibberPricesDataFetcher
|
||||||
from custom_components.tibber_prices.sensor.calculators.trend import TibberPricesTrendCalculator
|
from custom_components.tibber_prices.sensor.calculators.trend import TibberPricesTrendCalculator
|
||||||
|
|
||||||
# Adding package names to class would be redundant:
|
# Adding package names to class would be redundant:
|
||||||
# TibberPricesCoordinatorPriceDataManager ❌ NO - unnecessarily verbose
|
# TibberPricesCoordinatorDataFetcher ❌ NO - unnecessarily verbose
|
||||||
# TibberPricesSensorCalculatorsTrendCalculator ❌ NO - ridiculously long
|
# TibberPricesSensorCalculatorsTrendCalculator ❌ NO - ridiculously long
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
@ -1905,14 +1890,14 @@ result = _InternalHelper().process()
|
||||||
|
|
||||||
**Example of genuine private class use case:**
|
**Example of genuine private class use case:**
|
||||||
```python
|
```python
|
||||||
# In coordinator/price_data_manager.py
|
# In coordinator/data_fetching.py
|
||||||
class _ApiRetryStateMachine:
|
class _ApiRetryStateMachine:
|
||||||
"""Internal state machine for retry logic. Never used outside this file."""
|
"""Internal state machine for retry logic. Never used outside this file."""
|
||||||
def __init__(self, max_retries: int) -> None:
|
def __init__(self, max_retries: int) -> None:
|
||||||
self._attempts = 0
|
self._attempts = 0
|
||||||
self._max_retries = max_retries
|
self._max_retries = max_retries
|
||||||
|
|
||||||
# Only used by PriceDataManager methods in this file
|
# Only used by DataFetcher methods in this file
|
||||||
```
|
```
|
||||||
|
|
||||||
In practice, most "helper" logic should be **functions**, not classes. Reserve classes for stateful components.
|
In practice, most "helper" logic should be **functions**, not classes. Reserve classes for stateful components.
|
||||||
|
|
@ -2082,7 +2067,7 @@ Public entry points → direct helpers (call order) → pure utilities. Prefix p
|
||||||
|
|
||||||
**Documentation language:**
|
**Documentation language:**
|
||||||
|
|
||||||
- **CRITICAL**: All user-facing documentation (`README.md`, `docs/user/docs/`, `docs/developer/docs/`) MUST be written in **English**
|
- **CRITICAL**: All user-facing documentation (`README.md`, `/docs/user/`, `/docs/development/`) MUST be written in **English**
|
||||||
- **Code comments**: Always use English for code comments and docstrings
|
- **Code comments**: Always use English for code comments and docstrings
|
||||||
- **UI translations**: Multi-language support exists in `/translations/` and `/custom_translations/` (de, en, nb, nl, sv) for UI strings only
|
- **UI translations**: Multi-language support exists in `/translations/` and `/custom_translations/` (de, en, nb, nl, sv) for UI strings only
|
||||||
- **Why English-only docs**: Ensures maintainability, accessibility to global community, and consistency with Home Assistant ecosystem
|
- **Why English-only docs**: Ensures maintainability, accessibility to global community, and consistency with Home Assistant ecosystem
|
||||||
|
|
@ -2120,7 +2105,7 @@ Public entry points → direct helpers (call order) → pure utilities. Prefix p
|
||||||
|
|
||||||
**User Documentation Quality:**
|
**User Documentation Quality:**
|
||||||
|
|
||||||
When writing or updating user-facing documentation (`docs/user/docs/` or `docs/developer/docs/`), follow these principles learned from real user feedback:
|
When writing or updating user-facing documentation (`docs/user/`), follow these principles learned from real user feedback:
|
||||||
|
|
||||||
- **Clarity over completeness**: Users want to understand concepts, not read technical specifications
|
- **Clarity over completeness**: Users want to understand concepts, not read technical specifications
|
||||||
- ✅ Good: "Relaxation automatically loosens filters until enough periods are found"
|
- ✅ Good: "Relaxation automatically loosens filters until enough periods are found"
|
||||||
|
|
@ -2408,8 +2393,7 @@ attributes = {
|
||||||
"rating_level": ..., # Price rating (LOW, NORMAL, HIGH)
|
"rating_level": ..., # Price rating (LOW, NORMAL, HIGH)
|
||||||
|
|
||||||
# 3. Price statistics (how much does it cost?)
|
# 3. Price statistics (how much does it cost?)
|
||||||
"price_mean": ...,
|
"price_avg": ...,
|
||||||
"price_median": ...,
|
|
||||||
"price_min": ...,
|
"price_min": ...,
|
||||||
"price_max": ...,
|
"price_max": ...,
|
||||||
|
|
||||||
|
|
@ -2609,8 +2593,7 @@ This ensures timestamp is always the first key in the attribute dict, regardless
|
||||||
"start": "2025-11-08T14:00:00+01:00",
|
"start": "2025-11-08T14:00:00+01:00",
|
||||||
"end": "2025-11-08T15:00:00+01:00",
|
"end": "2025-11-08T15:00:00+01:00",
|
||||||
"rating_level": "LOW",
|
"rating_level": "LOW",
|
||||||
"price_mean": 18.5,
|
"price_avg": 18.5,
|
||||||
"price_median": 18.3,
|
|
||||||
"interval_count": 4,
|
"interval_count": 4,
|
||||||
"intervals": [...]
|
"intervals": [...]
|
||||||
}
|
}
|
||||||
|
|
@ -2621,7 +2604,7 @@ This ensures timestamp is always the first key in the attribute dict, regardless
|
||||||
"interval_count": 4,
|
"interval_count": 4,
|
||||||
"rating_level": "LOW",
|
"rating_level": "LOW",
|
||||||
"start": "2025-11-08T14:00:00+01:00",
|
"start": "2025-11-08T14:00:00+01:00",
|
||||||
"price_mean": 18.5,
|
"price_avg": 18.5,
|
||||||
"end": "2025-11-08T15:00:00+01:00"
|
"end": "2025-11-08T15:00:00+01:00"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
@ -2666,8 +2649,8 @@ This ensures timestamp is always the first key in the attribute dict, regardless
|
||||||
|
|
||||||
**Price-Related Attributes:**
|
**Price-Related Attributes:**
|
||||||
|
|
||||||
- Period statistics: `price_mean` (arithmetic mean), `price_median` (median value)
|
- Period averages: `period_price_avg` (average across the period)
|
||||||
- Reference comparisons: `period_price_diff_from_daily_min` (period mean vs daily min)
|
- Reference comparisons: `period_price_diff_from_daily_min` (period avg vs daily min)
|
||||||
- Interval-specific: `interval_price_diff_from_daily_max` (current interval vs daily max)
|
- Interval-specific: `interval_price_diff_from_daily_max` (current interval vs daily max)
|
||||||
|
|
||||||
### Before Adding New Attributes
|
### Before Adding New Attributes
|
||||||
|
|
@ -2741,12 +2724,12 @@ The refactoring consolidated duplicate logic into unified methods in `sensor/cor
|
||||||
|
|
||||||
- Replaces: `_get_statistics_value()` (calendar day portion)
|
- Replaces: `_get_statistics_value()` (calendar day portion)
|
||||||
- Handles: Min/max/avg for calendar days (today/tomorrow)
|
- Handles: Min/max/avg for calendar days (today/tomorrow)
|
||||||
- Returns: Price in subunit currency units (cents/øre)
|
- Returns: Price in minor currency units (cents/øre)
|
||||||
|
|
||||||
- **`_get_24h_window_value(stat_func)`**
|
- **`_get_24h_window_value(stat_func)`**
|
||||||
- Replaces: `_get_average_value()`, `_get_minmax_value()`
|
- Replaces: `_get_average_value()`, `_get_minmax_value()`
|
||||||
- Handles: Trailing/leading 24h window statistics
|
- Handles: Trailing/leading 24h window statistics
|
||||||
- Returns: Price in subunit currency units (cents/øre)
|
- Returns: Price in minor currency units (cents/øre)
|
||||||
|
|
||||||
Legacy wrapper methods still exist for backward compatibility but will be removed in a future cleanup phase.
|
Legacy wrapper methods still exist for backward compatibility but will be removed in a future cleanup phase.
|
||||||
|
|
||||||
|
|
@ -2863,32 +2846,3 @@ Only after consulting the official HA docs did we discover the correct pattern:
|
||||||
- Translations: `sensor/definitions.py` (translation_key usage)
|
- Translations: `sensor/definitions.py` (translation_key usage)
|
||||||
- Test fixtures: `tests/conftest.py`
|
- Test fixtures: `tests/conftest.py`
|
||||||
- Time handling: Any file importing `dt_util`
|
- Time handling: Any file importing `dt_util`
|
||||||
|
|
||||||
## Recorder History Optimization
|
|
||||||
|
|
||||||
**CRITICAL: Always exclude non-essential attributes from Recorder to prevent database bloat.**
|
|
||||||
|
|
||||||
**Implementation:**
|
|
||||||
- Use `_unrecorded_attributes = frozenset({...})` as **class attribute** in entity classes
|
|
||||||
- See `sensor/core.py` and `binary_sensor/core.py` for current implementation
|
|
||||||
|
|
||||||
**What to exclude:**
|
|
||||||
1. **Descriptions/help text** - `description`, `usage_tips` (static, large)
|
|
||||||
2. **Large nested structures** - `periods`, `data`, `*_attributes` dicts (>1KB)
|
|
||||||
3. **Frequently changing diagnostics** - `icon_color`, `cache_age`, status strings
|
|
||||||
4. **Static/rarely changing config** - `currency`, `resolution`, `*_id` mappings
|
|
||||||
5. **Temporary/time-bound data** - `next_api_poll`, `last_*` timestamps
|
|
||||||
6. **Redundant/derived data** - `price_spread`, `diff_%` (calculable from other attrs)
|
|
||||||
|
|
||||||
**What to keep:**
|
|
||||||
- `timestamp` (always), all price values, `cache_age_minutes`, `updates_today`
|
|
||||||
- Period timing (`start`, `end`, `duration_minutes`), price statistics
|
|
||||||
- Boolean status flags, `relaxation_active`
|
|
||||||
|
|
||||||
**When adding new attributes:**
|
|
||||||
- Will this be useful in history 1 week from now? No → Exclude
|
|
||||||
- Can this be calculated from other attributes? Yes → Exclude
|
|
||||||
- Is this >100 bytes and not essential? Yes → Exclude
|
|
||||||
|
|
||||||
**See:** `docs/developer/docs/recorder-optimization.md` for detailed categories and impact analysis
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -122,23 +122,13 @@ Always run before committing:
|
||||||
- Enrich price data before exposing to entities
|
- Enrich price data before exposing to entities
|
||||||
- Follow Home Assistant entity naming conventions
|
- Follow Home Assistant entity naming conventions
|
||||||
|
|
||||||
See [Coding Guidelines](docs/developer/docs/coding-guidelines.md) for complete details.
|
See [Coding Guidelines](docs/development/coding-guidelines.md) for complete details.
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
Documentation is organized in two Docusaurus sites:
|
- **User guides**: Place in `docs/user/` (installation, configuration, usage)
|
||||||
|
- **Developer guides**: Place in `docs/development/` (architecture, patterns)
|
||||||
- **User docs** (`docs/user/`): Installation, configuration, usage guides
|
- **Update translations**: When changing `translations/en.json`, update ALL language files
|
||||||
- Markdown files in `docs/user/docs/*.md`
|
|
||||||
- Navigation via `docs/user/sidebars.ts`
|
|
||||||
- **Developer docs** (`docs/developer/`): Architecture, patterns, contribution guides
|
|
||||||
- Markdown files in `docs/developer/docs/*.md`
|
|
||||||
- Navigation via `docs/developer/sidebars.ts`
|
|
||||||
|
|
||||||
**When adding new documentation:**
|
|
||||||
1. Place file in appropriate `docs/*/docs/` directory
|
|
||||||
2. Add to corresponding `sidebars.ts` for navigation
|
|
||||||
3. Update translations when changing `translations/en.json` (update ALL language files)
|
|
||||||
|
|
||||||
## Reporting Bugs
|
## Reporting Bugs
|
||||||
|
|
||||||
|
|
|
||||||
92
README.md
92
README.md
|
|
@ -1,8 +1,4 @@
|
||||||
# Tibber Prices - Custom Home Assistant Integration
|
# Tibber Price Information & Ratings
|
||||||
|
|
||||||
<p align="center">
|
|
||||||
<img src="images/header.svg" alt="Tibber Prices Custom Integration for Tibber" width="600">
|
|
||||||
</p>
|
|
||||||
|
|
||||||
[![GitHub Release][releases-shield]][releases]
|
[![GitHub Release][releases-shield]][releases]
|
||||||
[![GitHub Activity][commits-shield]][commits]
|
[![GitHub Activity][commits-shield]][commits]
|
||||||
|
|
@ -10,39 +6,28 @@
|
||||||
|
|
||||||
[![hacs][hacsbadge]][hacs]
|
[![hacs][hacsbadge]][hacs]
|
||||||
[![Project Maintenance][maintenance-shield]][user_profile]
|
[![Project Maintenance][maintenance-shield]][user_profile]
|
||||||
|
[![BuyMeCoffee][buymecoffeebadge]][buymecoffee]
|
||||||
|
|
||||||
<a href="https://www.buymeacoffee.com/jpawlowski" target="_blank"><img src="images/bmc-button.svg" alt="Buy Me A Coffee" height="41" width="174"></a>
|
A Home Assistant integration that provides advanced price information and ratings from Tibber. This integration fetches **quarter-hourly** electricity prices, enriches them with statistical analysis, and provides smart indicators to help you optimize your energy consumption and save money.
|
||||||
|
|
||||||
> **⚠️ Not affiliated with Tibber**
|

|
||||||
> This is an independent, community-maintained custom integration for Home Assistant. It is **not** an official Tibber product and is **not** affiliated with or endorsed by Tibber AS.
|
|
||||||
|
|
||||||
A custom Home Assistant integration that provides advanced electricity price information and ratings from Tibber. This integration fetches **quarter-hourly** electricity prices, enriches them with statistical analysis, and provides smart indicators to help you optimize your energy consumption and save money.
|
|
||||||
|
|
||||||
## 📖 Documentation
|
## 📖 Documentation
|
||||||
|
|
||||||
**[📚 Complete Documentation](https://jpawlowski.github.io/hass.tibber_prices/)** - Two comprehensive documentation sites:
|
- **[User Guide](docs/user/)** - Installation, configuration, and usage guides
|
||||||
|
- **[Period Calculation](docs/user/period-calculation.md)** - How Best/Peak Price periods are calculated
|
||||||
- **[👤 User Documentation](https://jpawlowski.github.io/hass.tibber_prices/user/)** - Installation, configuration, usage guides, and examples
|
- **[Developer Guide](docs/development/)** - Contributing, architecture, and release process
|
||||||
- **[🔧 Developer Documentation](https://jpawlowski.github.io/hass.tibber_prices/developer/)** - Architecture, contributing guidelines, and development setup
|
- **[Changelog](https://github.com/jpawlowski/hass.tibber_prices/releases)** - Release history and notes
|
||||||
|
|
||||||
**Quick Links:**
|
|
||||||
- [Installation Guide](https://jpawlowski.github.io/hass.tibber_prices/user/installation) - Step-by-step setup instructions
|
|
||||||
- [Sensor Reference](https://jpawlowski.github.io/hass.tibber_prices/user/sensors) - Complete list of all sensors and attributes
|
|
||||||
- [Chart Examples](https://jpawlowski.github.io/hass.tibber_prices/user/chart-examples) - ApexCharts visualizations
|
|
||||||
- [Automation Examples](https://jpawlowski.github.io/hass.tibber_prices/user/automation-examples) - Real-world automation scenarios
|
|
||||||
- [Changelog](https://github.com/jpawlowski/hass.tibber_prices/releases) - Release history and notes
|
|
||||||
|
|
||||||
## ✨ Features
|
## ✨ Features
|
||||||
|
|
||||||
- **Quarter-Hourly Price Data**: Access detailed 15-minute interval pricing (384 data points across 4 days: day before yesterday/yesterday/today/tomorrow)
|
- **Quarter-Hourly Price Data**: Access detailed 15-minute interval pricing (384 data points across 4 days: day before yesterday/yesterday/today/tomorrow)
|
||||||
- **Flexible Currency Display**: Choose between base currency (€, kr) or subunit (ct, øre) display - configurable per your preference with smart defaults
|
- **Current and Next Interval Prices**: Get real-time price data in both major currency (€, kr) and minor units (ct, øre)
|
||||||
- **Multi-Currency Support**: Automatic detection and formatting for EUR, NOK, SEK, DKK, USD, and GBP
|
- **Multi-Currency Support**: Automatic detection and formatting for EUR, NOK, SEK, DKK, USD, and GBP
|
||||||
- **Price Level Indicators**: Know when you're in a VERY_CHEAP, CHEAP, NORMAL, EXPENSIVE, or VERY_EXPENSIVE period
|
- **Price Level Indicators**: Know when you're in a VERY_CHEAP, CHEAP, NORMAL, EXPENSIVE, or VERY_EXPENSIVE period
|
||||||
- **Statistical Sensors**: Track lowest, highest, and average prices for the day
|
- **Statistical Sensors**: Track lowest, highest, and average prices for the day
|
||||||
- **Price Ratings**: Quarter-hourly ratings comparing current prices to 24-hour trailing averages
|
- **Price Ratings**: Quarter-hourly ratings comparing current prices to 24-hour trailing averages
|
||||||
- **Smart Indicators**: Binary sensors to detect peak hours and best price hours for automations
|
- **Smart Indicators**: Binary sensors to detect peak hours and best price hours for automations
|
||||||
- **Beautiful ApexCharts**: Auto-generated chart configurations with dynamic Y-axis scaling ([see examples](https://jpawlowski.github.io/hass.tibber_prices/user/chart-examples))
|
|
||||||
- **Chart Metadata Sensor**: Dynamic chart configuration for optimal visualization
|
|
||||||
- **Intelligent Caching**: Minimizes API calls while ensuring data freshness across Home Assistant restarts
|
- **Intelligent Caching**: Minimizes API calls while ensuring data freshness across Home Assistant restarts
|
||||||
- **Custom Actions** (backend services): API endpoints for advanced integrations (ApexCharts support included)
|
- **Custom Actions** (backend services): API endpoints for advanced integrations (ApexCharts support included)
|
||||||
- **Diagnostic Sensors**: Monitor data freshness and availability
|
- **Diagnostic Sensors**: Monitor data freshness and availability
|
||||||
|
|
@ -94,7 +79,7 @@ This will guide you through:
|
||||||
- Configure additional sensors in **Settings** → **Devices & Services** → **Tibber Price Information & Ratings** → **Entities**
|
- Configure additional sensors in **Settings** → **Devices & Services** → **Tibber Price Information & Ratings** → **Entities**
|
||||||
- Use sensors in automations, dashboards, and scripts
|
- Use sensors in automations, dashboards, and scripts
|
||||||
|
|
||||||
📖 **[Full Installation Guide →](https://jpawlowski.github.io/hass.tibber_prices/user/installation)**
|
📖 **[Full Installation Guide →](docs/user/installation.md)**
|
||||||
|
|
||||||
## 📊 Available Entities
|
## 📊 Available Entities
|
||||||
|
|
||||||
|
|
@ -102,8 +87,6 @@ The integration provides **30+ sensors** across different categories. Key sensor
|
||||||
|
|
||||||
> **Rich Sensor Attributes**: All sensors include extensive attributes with timestamps, context data, and detailed explanations. Enable **Extended Descriptions** in the integration options to add `long_description` and `usage_tips` attributes to every sensor, providing in-context documentation directly in Home Assistant's UI.
|
> **Rich Sensor Attributes**: All sensors include extensive attributes with timestamps, context data, and detailed explanations. Enable **Extended Descriptions** in the integration options to add `long_description` and `usage_tips` attributes to every sensor, providing in-context documentation directly in Home Assistant's UI.
|
||||||
|
|
||||||
**[📋 Complete Sensor Reference](https://jpawlowski.github.io/hass.tibber_prices/user/sensors)** - Full list with descriptions and attributes
|
|
||||||
|
|
||||||
### Core Price Sensors (Enabled by Default)
|
### Core Price Sensors (Enabled by Default)
|
||||||
|
|
||||||
| Entity | Description |
|
| Entity | Description |
|
||||||
|
|
@ -146,8 +129,8 @@ The integration provides **30+ sensors** across different categories. Key sensor
|
||||||
|
|
||||||
| Entity | Description |
|
| Entity | Description |
|
||||||
| ------------------------- | ----------------------------------------------------------------------------------------- |
|
| ------------------------- | ----------------------------------------------------------------------------------------- |
|
||||||
| Peak Price Period | ON when in a detected peak price period ([how it works](https://jpawlowski.github.io/hass.tibber_prices/user/period-calculation)) |
|
| Peak Price Period | ON when in a detected peak price period ([how it works](docs/user/period-calculation.md)) |
|
||||||
| Best Price Period | ON when in a detected best price period ([how it works](https://jpawlowski.github.io/hass.tibber_prices/user/period-calculation)) |
|
| Best Price Period | ON when in a detected best price period ([how it works](docs/user/period-calculation.md)) |
|
||||||
| Tibber API Connection | Connection status to Tibber API |
|
| Tibber API Connection | Connection status to Tibber API |
|
||||||
| Tomorrow's Data Available | Whether tomorrow's price data is available |
|
| Tomorrow's Data Available | Whether tomorrow's price data is available |
|
||||||
|
|
||||||
|
|
@ -165,15 +148,13 @@ The following sensors are available but disabled by default. Enable them in `Set
|
||||||
- **Previous Interval Price** & **Previous Interval Price Level**: Historical data for the last 15-minute interval
|
- **Previous Interval Price** & **Previous Interval Price Level**: Historical data for the last 15-minute interval
|
||||||
- **Previous Interval Price Rating**: Rating for the previous interval
|
- **Previous Interval Price Rating**: Rating for the previous interval
|
||||||
- **Trailing 24h Average Price**: Average of the past 24 hours from now
|
- **Trailing 24h Average Price**: Average of the past 24 hours from now
|
||||||
- **Trailing 24h Minimum/Maximum Price**: Min/max in the past 24 hours
|
- **Trailing 24h Minimum/Maximum Price**: Min/max in the past 24 hours
|
||||||
|
|
||||||
> **Note**: Currency display is configurable during setup. Choose between:
|
> **Note**: All monetary sensors use minor currency units (ct/kWh, øre/kWh, ¢/kWh, p/kWh) automatically based on your Tibber account's currency. Supported: EUR, NOK, SEK, DKK, USD, GBP.
|
||||||
> - **Base currency** (€/kWh, kr/kWh) - decimal values, differences visible from 3rd-4th decimal
|
|
||||||
> - **Subunit** (ct/kWh, øre/kWh) - larger values, differences visible from 1st decimal
|
|
||||||
>
|
|
||||||
> Smart defaults: EUR → subunit (German/Dutch preference), NOK/SEK/DKK → base (Scandinavian preference). Supported currencies: EUR, NOK, SEK, DKK, USD, GBP.
|
|
||||||
|
|
||||||
## Automation Examples> **Note:** See the [full automation examples guide](https://jpawlowski.github.io/hass.tibber_prices/user/automation-examples) for more advanced recipes.
|
## Automation Examples
|
||||||
|
|
||||||
|
> **Note:** See the [full automation examples guide](docs/user/automation-examples.md) for more advanced recipes.
|
||||||
|
|
||||||
### Run Appliances During Cheap Hours
|
### Run Appliances During Cheap Hours
|
||||||
|
|
||||||
|
|
@ -196,7 +177,7 @@ automation:
|
||||||
entity_id: switch.dishwasher
|
entity_id: switch.dishwasher
|
||||||
```
|
```
|
||||||
|
|
||||||
> **Learn more:** The [period calculation guide](https://jpawlowski.github.io/hass.tibber_prices/user/period-calculation) explains how Best/Peak Price periods are identified and how you can configure filters (flexibility, minimum distance from average, price level filters with gap tolerance).
|
> **Learn more:** The [period calculation guide](docs/user/period-calculation.md) explains how Best/Peak Price periods are identified and how you can configure filters (flexibility, minimum distance from average, price level filters with gap tolerance).
|
||||||
|
|
||||||
### Notify on Extremely High Prices
|
### Notify on Extremely High Prices
|
||||||
|
|
||||||
|
|
@ -284,9 +265,8 @@ automation:
|
||||||
### Currency or units showing incorrectly
|
### Currency or units showing incorrectly
|
||||||
|
|
||||||
- Currency is automatically detected from your Tibber account
|
- Currency is automatically detected from your Tibber account
|
||||||
- Display mode (base currency vs. subunit) can be configured in integration options: `Settings > Devices & Services > Tibber Price Information & Ratings > Configure`
|
- The integration supports EUR, NOK, SEK, DKK, USD, and GBP with appropriate minor units
|
||||||
- Supported currencies: EUR, NOK, SEK, DKK, USD, and GBP
|
- Enable/disable major vs. minor unit sensors in `Settings > Devices & Services > Tibber Price Information & Ratings > Entities`
|
||||||
- Smart defaults apply: EUR users get subunit (ct), Scandinavian users get base currency (kr)
|
|
||||||
|
|
||||||
## Advanced Features
|
## Advanced Features
|
||||||
|
|
||||||
|
|
@ -326,17 +306,7 @@ template:
|
||||||
Price at {{ timestamp }}: {{ price }} ct/kWh
|
Price at {{ timestamp }}: {{ price }} ct/kWh
|
||||||
```
|
```
|
||||||
|
|
||||||
📖 **[View all sensors and attributes →](https://jpawlowski.github.io/hass.tibber_prices/user/sensors)**
|
📖 **[View all sensors and attributes →](docs/user/sensors.md)**
|
||||||
|
|
||||||
### Dynamic Icons & Visual Indicators
|
|
||||||
|
|
||||||
All sensors feature dynamic icons that change based on price levels, providing instant visual feedback in your dashboards.
|
|
||||||
|
|
||||||
<img src="docs/user/static/img/entities-overview.jpg" width="400" alt="Entity list showing dynamic icons for different price states">
|
|
||||||
|
|
||||||
_Dynamic icons adapt to price levels, trends, and period states - showing CHEAP prices, FALLING trend, and active Best Price Period_
|
|
||||||
|
|
||||||
📖 **[Dynamic Icons Guide →](https://jpawlowski.github.io/hass.tibber_prices/user/dynamic-icons)** | **[Icon Colors Guide →](https://jpawlowski.github.io/hass.tibber_prices/user/icon-colors)**
|
|
||||||
|
|
||||||
### Custom Actions
|
### Custom Actions
|
||||||
|
|
||||||
|
|
@ -346,27 +316,23 @@ The integration provides custom actions (they still appear as services under the
|
||||||
- `tibber_prices.get_apexcharts_yaml` - Generate complete ApexCharts configurations
|
- `tibber_prices.get_apexcharts_yaml` - Generate complete ApexCharts configurations
|
||||||
- `tibber_prices.refresh_user_data` - Manually refresh account information
|
- `tibber_prices.refresh_user_data` - Manually refresh account information
|
||||||
|
|
||||||
📖 **[Action documentation and examples →](https://jpawlowski.github.io/hass.tibber_prices/user/actions)**
|
📖 **[Action documentation and examples →](docs/user/actions.md)**
|
||||||
|
|
||||||
### Chart Visualizations (Optional)
|
### ApexCharts Integration
|
||||||
|
|
||||||
The integration includes built-in support for creating price visualization cards with automatic Y-axis scaling and color-coded series.
|
The integration includes built-in support for creating beautiful price visualization cards. Use the `get_apexcharts_yaml` action to generate card configurations automatically.
|
||||||
|
|
||||||
<img src="docs/user/static/img/charts/rolling-window.jpg" width="600" alt="Example: Dynamic 48h rolling window chart">
|
📖 **[ApexCharts examples →](docs/user/automation-examples.md#apexcharts-cards)**
|
||||||
|
|
||||||
_Optional: Dynamic 48h chart with automatic Y-axis scaling - generated via `get_apexcharts_yaml` action_
|
|
||||||
|
|
||||||
📖 **[Chart examples and setup guide →](https://jpawlowski.github.io/hass.tibber_prices/user/chart-examples)**
|
|
||||||
|
|
||||||
## 🤝 Contributing
|
## 🤝 Contributing
|
||||||
|
|
||||||
Contributions are welcome! Please read the [Contributing Guidelines](CONTRIBUTING.md) and [Developer Documentation](https://jpawlowski.github.io/hass.tibber_prices/developer/) before submitting pull requests.
|
Contributions are welcome! Please read the [Contributing Guidelines](CONTRIBUTING.md) and [Developer Guide](docs/development/) before submitting pull requests.
|
||||||
|
|
||||||
### For Contributors
|
### For Contributors
|
||||||
|
|
||||||
- **[Developer Setup](https://jpawlowski.github.io/hass.tibber_prices/developer/setup)** - Get started with DevContainer
|
- **[Developer Setup](docs/development/setup.md)** - Get started with DevContainer
|
||||||
- **[Architecture Guide](https://jpawlowski.github.io/hass.tibber_prices/developer/architecture)** - Understand the codebase
|
- **[Architecture Guide](docs/development/architecture.md)** - Understand the codebase
|
||||||
- **[Release Management](https://jpawlowski.github.io/hass.tibber_prices/developer/release-management)** - Release process and versioning
|
- **[Release Management](docs/development/release-management.md)** - Release process and versioning
|
||||||
|
|
||||||
## 🤖 Development Note
|
## 🤖 Development Note
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -49,8 +49,6 @@ logger:
|
||||||
custom_components.tibber_prices.coordinator.period_handlers.period_overlap.details: info
|
custom_components.tibber_prices.coordinator.period_handlers.period_overlap.details: info
|
||||||
# Outlier flex capping
|
# Outlier flex capping
|
||||||
custom_components.tibber_prices.coordinator.period_handlers.core.details: info
|
custom_components.tibber_prices.coordinator.period_handlers.core.details: info
|
||||||
# Level filtering details (min_distance scaling)
|
|
||||||
custom_components.tibber_prices.coordinator.period_handlers.level_filtering.details: info
|
|
||||||
|
|
||||||
# Interval pool details (cache operations, GC):
|
# Interval pool details (cache operations, GC):
|
||||||
# Cache lookup/miss, gap detection, fetch group additions
|
# Cache lookup/miss, gap detection, fetch group additions
|
||||||
|
|
|
||||||
|
|
@ -20,10 +20,7 @@ from homeassistant.loader import async_get_loaded_integration
|
||||||
|
|
||||||
from .api import TibberPricesApiClient
|
from .api import TibberPricesApiClient
|
||||||
from .const import (
|
from .const import (
|
||||||
CONF_CURRENCY_DISPLAY_MODE,
|
|
||||||
DATA_CHART_CONFIG,
|
DATA_CHART_CONFIG,
|
||||||
DATA_CHART_METADATA_CONFIG,
|
|
||||||
DISPLAY_MODE_SUBUNIT,
|
|
||||||
DOMAIN,
|
DOMAIN,
|
||||||
LOGGER,
|
LOGGER,
|
||||||
async_load_standard_translations,
|
async_load_standard_translations,
|
||||||
|
|
@ -47,8 +44,6 @@ if TYPE_CHECKING:
|
||||||
PLATFORMS: list[Platform] = [
|
PLATFORMS: list[Platform] = [
|
||||||
Platform.SENSOR,
|
Platform.SENSOR,
|
||||||
Platform.BINARY_SENSOR,
|
Platform.BINARY_SENSOR,
|
||||||
Platform.NUMBER,
|
|
||||||
Platform.SWITCH,
|
|
||||||
]
|
]
|
||||||
|
|
||||||
# Configuration schema for configuration.yaml
|
# Configuration schema for configuration.yaml
|
||||||
|
|
@ -61,7 +56,7 @@ CONFIG_SCHEMA = vol.Schema(
|
||||||
vol.Optional("day"): vol.All(vol.Any(str, list), vol.Coerce(list)),
|
vol.Optional("day"): vol.All(vol.Any(str, list), vol.Coerce(list)),
|
||||||
vol.Optional("resolution"): str,
|
vol.Optional("resolution"): str,
|
||||||
vol.Optional("output_format"): str,
|
vol.Optional("output_format"): str,
|
||||||
vol.Optional("subunit_currency"): bool,
|
vol.Optional("minor_currency"): bool,
|
||||||
vol.Optional("round_decimals"): vol.All(int, vol.Range(min=0, max=10)),
|
vol.Optional("round_decimals"): vol.All(int, vol.Range(min=0, max=10)),
|
||||||
vol.Optional("include_level"): bool,
|
vol.Optional("include_level"): bool,
|
||||||
vol.Optional("include_rating_level"): bool,
|
vol.Optional("include_rating_level"): bool,
|
||||||
|
|
@ -105,48 +100,9 @@ async def async_setup(hass: HomeAssistant, config: dict[str, Any]) -> bool:
|
||||||
LOGGER.debug("No chart_export configuration found in configuration.yaml")
|
LOGGER.debug("No chart_export configuration found in configuration.yaml")
|
||||||
hass.data[DOMAIN][DATA_CHART_CONFIG] = {}
|
hass.data[DOMAIN][DATA_CHART_CONFIG] = {}
|
||||||
|
|
||||||
# Extract chart_metadata config if present
|
|
||||||
chart_metadata_config = domain_config.get("chart_metadata", {})
|
|
||||||
|
|
||||||
if chart_metadata_config:
|
|
||||||
LOGGER.debug("Loaded chart_metadata configuration from configuration.yaml")
|
|
||||||
hass.data[DOMAIN][DATA_CHART_METADATA_CONFIG] = chart_metadata_config
|
|
||||||
else:
|
|
||||||
LOGGER.debug("No chart_metadata configuration found in configuration.yaml")
|
|
||||||
hass.data[DOMAIN][DATA_CHART_METADATA_CONFIG] = {}
|
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
async def _migrate_config_options(hass: HomeAssistant, entry: ConfigEntry) -> None:
|
|
||||||
"""
|
|
||||||
Migrate config options for backward compatibility.
|
|
||||||
|
|
||||||
This ensures existing configs get sensible defaults when new options are added.
|
|
||||||
Runs automatically on integration startup.
|
|
||||||
"""
|
|
||||||
migration_performed = False
|
|
||||||
migrated = dict(entry.options)
|
|
||||||
|
|
||||||
# Migration: Set currency_display_mode to subunit for legacy configs
|
|
||||||
# New configs (created after v1.1.0) get currency-appropriate defaults via get_default_options().
|
|
||||||
# This migration preserves legacy behavior where all prices were in subunit currency (cents/øre).
|
|
||||||
# Only runs for old config entries that don't have this option explicitly set.
|
|
||||||
if CONF_CURRENCY_DISPLAY_MODE not in migrated:
|
|
||||||
migrated[CONF_CURRENCY_DISPLAY_MODE] = DISPLAY_MODE_SUBUNIT
|
|
||||||
migration_performed = True
|
|
||||||
LOGGER.info(
|
|
||||||
"[%s] Migrated legacy config: Set currency_display_mode=%s (preserves pre-v1.1.0 behavior)",
|
|
||||||
entry.title,
|
|
||||||
DISPLAY_MODE_SUBUNIT,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Save migrated options if any changes were made
|
|
||||||
if migration_performed:
|
|
||||||
hass.config_entries.async_update_entry(entry, options=migrated)
|
|
||||||
LOGGER.debug("[%s] Config migration completed", entry.title)
|
|
||||||
|
|
||||||
|
|
||||||
def _get_access_token(hass: HomeAssistant, entry: ConfigEntry) -> str:
|
def _get_access_token(hass: HomeAssistant, entry: ConfigEntry) -> str:
|
||||||
"""
|
"""
|
||||||
Get access token from entry or parent entry.
|
Get access token from entry or parent entry.
|
||||||
|
|
@ -191,10 +147,6 @@ async def async_setup_entry(
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""Set up this integration using UI."""
|
"""Set up this integration using UI."""
|
||||||
LOGGER.debug(f"[tibber_prices] async_setup_entry called for entry_id={entry.entry_id}")
|
LOGGER.debug(f"[tibber_prices] async_setup_entry called for entry_id={entry.entry_id}")
|
||||||
|
|
||||||
# Migrate config options if needed (e.g., set default currency display mode for existing configs)
|
|
||||||
await _migrate_config_options(hass, entry)
|
|
||||||
|
|
||||||
# Preload translations to populate the cache
|
# Preload translations to populate the cache
|
||||||
await async_load_translations(hass, "en")
|
await async_load_translations(hass, "en")
|
||||||
await async_load_standard_translations(hass, "en")
|
await async_load_standard_translations(hass, "en")
|
||||||
|
|
@ -279,8 +231,7 @@ async def async_setup_entry(
|
||||||
# https://developers.home-assistant.io/docs/integration_fetching_data#coordinated-single-api-poll-for-data-for-all-entities
|
# https://developers.home-assistant.io/docs/integration_fetching_data#coordinated-single-api-poll-for-data-for-all-entities
|
||||||
if entry.state == ConfigEntryState.SETUP_IN_PROGRESS:
|
if entry.state == ConfigEntryState.SETUP_IN_PROGRESS:
|
||||||
await coordinator.async_config_entry_first_refresh()
|
await coordinator.async_config_entry_first_refresh()
|
||||||
# Note: Options update listener is registered in coordinator.__init__
|
entry.async_on_unload(entry.add_update_listener(async_reload_entry))
|
||||||
# (handles cache invalidation + refresh without full reload)
|
|
||||||
else:
|
else:
|
||||||
await coordinator.async_refresh()
|
await coordinator.async_refresh()
|
||||||
|
|
||||||
|
|
@ -300,9 +251,6 @@ async def async_unload_entry(
|
||||||
await async_save_pool_state(hass, entry.entry_id, pool_state)
|
await async_save_pool_state(hass, entry.entry_id, pool_state)
|
||||||
LOGGER.debug("[%s] Interval pool state saved on unload", entry.title)
|
LOGGER.debug("[%s] Interval pool state saved on unload", entry.title)
|
||||||
|
|
||||||
# Shutdown interval pool (cancels background tasks)
|
|
||||||
await entry.runtime_data.interval_pool.async_shutdown()
|
|
||||||
|
|
||||||
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
|
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
|
||||||
|
|
||||||
if unload_ok and entry.runtime_data is not None:
|
if unload_ok and entry.runtime_data is not None:
|
||||||
|
|
|
||||||
|
|
@ -886,24 +886,7 @@ class TibberPricesApiClient:
|
||||||
headers: dict | None = None,
|
headers: dict | None = None,
|
||||||
query_type: TibberPricesQueryType = TibberPricesQueryType.USER,
|
query_type: TibberPricesQueryType = TibberPricesQueryType.USER,
|
||||||
) -> Any:
|
) -> Any:
|
||||||
"""
|
"""Get information from the API with rate limiting and retry logic."""
|
||||||
Get information from the API with rate limiting and retry logic.
|
|
||||||
|
|
||||||
Exception Handling Strategy:
|
|
||||||
- AuthenticationError: Immediate raise, triggers reauth flow
|
|
||||||
- PermissionError: Immediate raise, non-retryable
|
|
||||||
- CommunicationError: Retry with exponential backoff
|
|
||||||
- ApiClientError (Rate Limit): Retry with Retry-After delay
|
|
||||||
- ApiClientError (Other): Retry only if explicitly retryable
|
|
||||||
- Network errors (aiohttp.ClientError, socket.gaierror, TimeoutError):
|
|
||||||
Converted to CommunicationError and retried
|
|
||||||
|
|
||||||
Retry Logic:
|
|
||||||
- Max retries: 5 (configurable via _max_retries)
|
|
||||||
- Base delay: 2 seconds (exponential backoff: 2s, 4s, 8s, 16s, 32s)
|
|
||||||
- Rate limit delay: Uses Retry-After header or falls back to exponential
|
|
||||||
- Caps: 30s for network errors, 120s for rate limits, 300s for Retry-After
|
|
||||||
"""
|
|
||||||
headers = headers or prepare_headers(self._access_token, self._version)
|
headers = headers or prepare_headers(self._access_token, self._version)
|
||||||
last_error: Exception | None = None
|
last_error: Exception | None = None
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -25,79 +25,31 @@ HTTP_BAD_REQUEST = 400
|
||||||
HTTP_UNAUTHORIZED = 401
|
HTTP_UNAUTHORIZED = 401
|
||||||
HTTP_FORBIDDEN = 403
|
HTTP_FORBIDDEN = 403
|
||||||
HTTP_TOO_MANY_REQUESTS = 429
|
HTTP_TOO_MANY_REQUESTS = 429
|
||||||
HTTP_INTERNAL_SERVER_ERROR = 500
|
|
||||||
HTTP_BAD_GATEWAY = 502
|
|
||||||
HTTP_SERVICE_UNAVAILABLE = 503
|
|
||||||
HTTP_GATEWAY_TIMEOUT = 504
|
|
||||||
|
|
||||||
|
|
||||||
def verify_response_or_raise(response: aiohttp.ClientResponse) -> None:
|
def verify_response_or_raise(response: aiohttp.ClientResponse) -> None:
|
||||||
"""
|
"""Verify that the response is valid."""
|
||||||
Verify HTTP response and map to appropriate exceptions.
|
|
||||||
|
|
||||||
Error Mapping:
|
|
||||||
- 401 Unauthorized → AuthenticationError (non-retryable)
|
|
||||||
- 403 Forbidden → PermissionError (non-retryable)
|
|
||||||
- 429 Rate Limit → ApiClientError with retry support
|
|
||||||
- 400 Bad Request → ApiClientError (non-retryable, invalid query)
|
|
||||||
- 5xx Server Errors → CommunicationError (retryable)
|
|
||||||
- Other errors → Let aiohttp.raise_for_status() handle
|
|
||||||
"""
|
|
||||||
# Authentication failures - non-retryable
|
|
||||||
if response.status == HTTP_UNAUTHORIZED:
|
if response.status == HTTP_UNAUTHORIZED:
|
||||||
_LOGGER.error("Tibber API authentication failed - check access token")
|
_LOGGER.error("Tibber API authentication failed - check access token")
|
||||||
raise TibberPricesApiClientAuthenticationError(TibberPricesApiClientAuthenticationError.INVALID_CREDENTIALS)
|
raise TibberPricesApiClientAuthenticationError(TibberPricesApiClientAuthenticationError.INVALID_CREDENTIALS)
|
||||||
|
|
||||||
# Permission denied - non-retryable
|
|
||||||
if response.status == HTTP_FORBIDDEN:
|
if response.status == HTTP_FORBIDDEN:
|
||||||
_LOGGER.error("Tibber API access forbidden - insufficient permissions")
|
_LOGGER.error("Tibber API access forbidden - insufficient permissions")
|
||||||
raise TibberPricesApiClientPermissionError(TibberPricesApiClientPermissionError.INSUFFICIENT_PERMISSIONS)
|
raise TibberPricesApiClientPermissionError(TibberPricesApiClientPermissionError.INSUFFICIENT_PERMISSIONS)
|
||||||
|
|
||||||
# Rate limiting - retryable with explicit delay
|
|
||||||
if response.status == HTTP_TOO_MANY_REQUESTS:
|
if response.status == HTTP_TOO_MANY_REQUESTS:
|
||||||
# Check for Retry-After header that Tibber might send
|
# Check for Retry-After header that Tibber might send
|
||||||
retry_after = response.headers.get("Retry-After", "unknown")
|
retry_after = response.headers.get("Retry-After", "unknown")
|
||||||
_LOGGER.warning("Tibber API rate limit exceeded - retry after %s seconds", retry_after)
|
_LOGGER.warning("Tibber API rate limit exceeded - retry after %s seconds", retry_after)
|
||||||
raise TibberPricesApiClientError(TibberPricesApiClientError.RATE_LIMIT_ERROR.format(retry_after=retry_after))
|
raise TibberPricesApiClientError(TibberPricesApiClientError.RATE_LIMIT_ERROR.format(retry_after=retry_after))
|
||||||
|
|
||||||
# Bad request - non-retryable (invalid query)
|
|
||||||
if response.status == HTTP_BAD_REQUEST:
|
if response.status == HTTP_BAD_REQUEST:
|
||||||
_LOGGER.error("Tibber API rejected request - likely invalid GraphQL query")
|
_LOGGER.error("Tibber API rejected request - likely invalid GraphQL query")
|
||||||
raise TibberPricesApiClientError(
|
raise TibberPricesApiClientError(
|
||||||
TibberPricesApiClientError.INVALID_QUERY_ERROR.format(message="Bad request - likely invalid GraphQL query")
|
TibberPricesApiClientError.INVALID_QUERY_ERROR.format(message="Bad request - likely invalid GraphQL query")
|
||||||
)
|
)
|
||||||
|
|
||||||
# Server errors 5xx - retryable (temporary server issues)
|
|
||||||
if response.status in (
|
|
||||||
HTTP_INTERNAL_SERVER_ERROR,
|
|
||||||
HTTP_BAD_GATEWAY,
|
|
||||||
HTTP_SERVICE_UNAVAILABLE,
|
|
||||||
HTTP_GATEWAY_TIMEOUT,
|
|
||||||
):
|
|
||||||
_LOGGER.warning(
|
|
||||||
"Tibber API server error %d - temporary issue, will retry",
|
|
||||||
response.status,
|
|
||||||
)
|
|
||||||
# Let this be caught as aiohttp.ClientResponseError in _api_wrapper
|
|
||||||
# where it's converted to CommunicationError with retry logic
|
|
||||||
response.raise_for_status()
|
|
||||||
|
|
||||||
# All other HTTP errors - let aiohttp handle
|
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
|
|
||||||
|
|
||||||
async def verify_graphql_response(response_json: dict, query_type: TibberPricesQueryType) -> None:
|
async def verify_graphql_response(response_json: dict, query_type: TibberPricesQueryType) -> None:
|
||||||
"""
|
"""Verify the GraphQL response for errors and data completeness, including empty data."""
|
||||||
Verify GraphQL response and map error codes to appropriate exceptions.
|
|
||||||
|
|
||||||
GraphQL Error Code Mapping:
|
|
||||||
- UNAUTHENTICATED → AuthenticationError (triggers reauth flow)
|
|
||||||
- FORBIDDEN → PermissionError (non-retryable)
|
|
||||||
- RATE_LIMITED/TOO_MANY_REQUESTS → ApiClientError (retryable)
|
|
||||||
- VALIDATION_ERROR/GRAPHQL_VALIDATION_FAILED → ApiClientError (non-retryable)
|
|
||||||
- Other codes → Generic ApiClientError (with code in message)
|
|
||||||
- Empty data → ApiClientError (non-retryable, API has no data)
|
|
||||||
"""
|
|
||||||
if "errors" in response_json:
|
if "errors" in response_json:
|
||||||
errors = response_json["errors"]
|
errors = response_json["errors"]
|
||||||
if not errors:
|
if not errors:
|
||||||
|
|
@ -340,8 +292,7 @@ def flatten_price_info(subscription: dict) -> list[dict]:
|
||||||
A flat list containing all price dictionaries (startsAt, total, level).
|
A flat list containing all price dictionaries (startsAt, total, level).
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Use 'or {}' to handle None values (API may return None during maintenance)
|
price_info_range = subscription.get("priceInfoRange", {})
|
||||||
price_info_range = subscription.get("priceInfoRange") or {}
|
|
||||||
|
|
||||||
# Transform priceInfoRange edges data (extract historical quarter-hourly prices)
|
# Transform priceInfoRange edges data (extract historical quarter-hourly prices)
|
||||||
# This contains 192 intervals (2 days) starting from day before yesterday midnight
|
# This contains 192 intervals (2 days) starting from day before yesterday midnight
|
||||||
|
|
@ -356,6 +307,8 @@ def flatten_price_info(subscription: dict) -> list[dict]:
|
||||||
historical_prices.append(edge["node"])
|
historical_prices.append(edge["node"])
|
||||||
|
|
||||||
# Return all intervals as a single flattened array
|
# Return all intervals as a single flattened array
|
||||||
# Use 'or {}' to handle None values (API may return None during maintenance)
|
return (
|
||||||
price_info = subscription.get("priceInfo") or {}
|
historical_prices
|
||||||
return historical_prices + (price_info.get("today") or []) + (price_info.get("tomorrow") or [])
|
+ subscription.get("priceInfo", {}).get("today", [])
|
||||||
|
+ subscription.get("priceInfo", {}).get("tomorrow", [])
|
||||||
|
)
|
||||||
|
|
|
||||||
|
|
@ -4,15 +4,9 @@ from __future__ import annotations
|
||||||
|
|
||||||
from typing import TYPE_CHECKING
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import get_display_unit_factor
|
|
||||||
from custom_components.tibber_prices.coordinator.helpers import get_intervals_for_day_offsets
|
from custom_components.tibber_prices.coordinator.helpers import get_intervals_for_day_offsets
|
||||||
from custom_components.tibber_prices.entity_utils import add_icon_color_attribute
|
from custom_components.tibber_prices.entity_utils import add_icon_color_attribute
|
||||||
|
|
||||||
# Constants for price display conversion
|
|
||||||
_SUBUNIT_FACTOR = 100 # Conversion factor for subunit currency (ct/øre)
|
|
||||||
_SUBUNIT_PRECISION = 2 # Decimal places for subunit currency
|
|
||||||
_BASE_PRECISION = 4 # Decimal places for base currency
|
|
||||||
|
|
||||||
# Import TypedDict definitions for documentation (not used in signatures)
|
# Import TypedDict definitions for documentation (not used in signatures)
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
|
@ -72,7 +66,6 @@ def get_price_intervals_attributes(
|
||||||
*,
|
*,
|
||||||
time: TibberPricesTimeService,
|
time: TibberPricesTimeService,
|
||||||
reverse_sort: bool,
|
reverse_sort: bool,
|
||||||
config_entry: TibberPricesConfigEntry,
|
|
||||||
) -> dict | None:
|
) -> dict | None:
|
||||||
"""
|
"""
|
||||||
Build attributes for period-based sensors (best/peak price).
|
Build attributes for period-based sensors (best/peak price).
|
||||||
|
|
@ -83,13 +76,11 @@ def get_price_intervals_attributes(
|
||||||
1. Get period summaries from coordinator (already filtered and fully calculated)
|
1. Get period summaries from coordinator (already filtered and fully calculated)
|
||||||
2. Add the current timestamp
|
2. Add the current timestamp
|
||||||
3. Find current or next period based on time
|
3. Find current or next period based on time
|
||||||
4. Convert prices to display units based on user configuration
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
coordinator_data: Coordinator data dict
|
coordinator_data: Coordinator data dict
|
||||||
time: TibberPricesTimeService instance (required)
|
time: TibberPricesTimeService instance (required)
|
||||||
reverse_sort: True for peak_price (highest first), False for best_price (lowest first)
|
reverse_sort: True for peak_price (highest first), False for best_price (lowest first)
|
||||||
config_entry: Config entry for display unit configuration
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Attributes dict with current/next period and all periods list
|
Attributes dict with current/next period and all periods list
|
||||||
|
|
@ -110,20 +101,11 @@ def get_price_intervals_attributes(
|
||||||
if not period_summaries:
|
if not period_summaries:
|
||||||
return build_no_periods_result(time=time)
|
return build_no_periods_result(time=time)
|
||||||
|
|
||||||
# Filter periods for today+tomorrow (sensors don't show yesterday's periods)
|
|
||||||
# Coordinator cache contains yesterday/today/tomorrow, but sensors only need today+tomorrow
|
|
||||||
now = time.now()
|
|
||||||
today_start = time.start_of_local_day(now)
|
|
||||||
filtered_periods = [period for period in period_summaries if period.get("end") and period["end"] >= today_start]
|
|
||||||
|
|
||||||
if not filtered_periods:
|
|
||||||
return build_no_periods_result(time=time)
|
|
||||||
|
|
||||||
# Find current or next period based on current time
|
# Find current or next period based on current time
|
||||||
current_period = None
|
current_period = None
|
||||||
|
|
||||||
# First pass: find currently active period
|
# First pass: find currently active period
|
||||||
for period in filtered_periods:
|
for period in period_summaries:
|
||||||
start = period.get("start")
|
start = period.get("start")
|
||||||
end = period.get("end")
|
end = period.get("end")
|
||||||
if start and end and time.is_current_interval(start, end):
|
if start and end and time.is_current_interval(start, end):
|
||||||
|
|
@ -132,14 +114,14 @@ def get_price_intervals_attributes(
|
||||||
|
|
||||||
# Second pass: find next future period if none is active
|
# Second pass: find next future period if none is active
|
||||||
if not current_period:
|
if not current_period:
|
||||||
for period in filtered_periods:
|
for period in period_summaries:
|
||||||
start = period.get("start")
|
start = period.get("start")
|
||||||
if start and time.is_in_future(start):
|
if start and time.is_in_future(start):
|
||||||
current_period = period
|
current_period = period
|
||||||
break
|
break
|
||||||
|
|
||||||
# Build final attributes (use filtered_periods for display)
|
# Build final attributes
|
||||||
return build_final_attributes_simple(current_period, filtered_periods, time=time, config_entry=config_entry)
|
return build_final_attributes_simple(current_period, period_summaries, time=time)
|
||||||
|
|
||||||
|
|
||||||
def build_no_periods_result(*, time: TibberPricesTimeService) -> dict:
|
def build_no_periods_result(*, time: TibberPricesTimeService) -> dict:
|
||||||
|
|
@ -184,60 +166,26 @@ def add_decision_attributes(attributes: dict, current_period: dict) -> None:
|
||||||
attributes["rating_difference_%"] = current_period["rating_difference_%"]
|
attributes["rating_difference_%"] = current_period["rating_difference_%"]
|
||||||
|
|
||||||
|
|
||||||
def add_price_attributes(attributes: dict, current_period: dict, factor: int) -> None:
|
def add_price_attributes(attributes: dict, current_period: dict) -> None:
|
||||||
"""
|
"""Add price statistics attributes (priority 3)."""
|
||||||
Add price statistics attributes (priority 3).
|
if "price_avg" in current_period:
|
||||||
|
attributes["price_avg"] = current_period["price_avg"]
|
||||||
Args:
|
|
||||||
attributes: Target dict to add attributes to
|
|
||||||
current_period: Period dict with price data (in base currency)
|
|
||||||
factor: Display unit conversion factor (100 for subunit, 1 for base)
|
|
||||||
|
|
||||||
"""
|
|
||||||
# Convert prices from base currency to display units
|
|
||||||
precision = _SUBUNIT_PRECISION if factor == _SUBUNIT_FACTOR else _BASE_PRECISION
|
|
||||||
|
|
||||||
if "price_mean" in current_period:
|
|
||||||
attributes["price_mean"] = round(current_period["price_mean"] * factor, precision)
|
|
||||||
if "price_median" in current_period:
|
|
||||||
attributes["price_median"] = round(current_period["price_median"] * factor, precision)
|
|
||||||
if "price_min" in current_period:
|
if "price_min" in current_period:
|
||||||
attributes["price_min"] = round(current_period["price_min"] * factor, precision)
|
attributes["price_min"] = current_period["price_min"]
|
||||||
if "price_max" in current_period:
|
if "price_max" in current_period:
|
||||||
attributes["price_max"] = round(current_period["price_max"] * factor, precision)
|
attributes["price_max"] = current_period["price_max"]
|
||||||
if "price_spread" in current_period:
|
if "price_spread" in current_period:
|
||||||
attributes["price_spread"] = round(current_period["price_spread"] * factor, precision)
|
attributes["price_spread"] = current_period["price_spread"]
|
||||||
if "price_coefficient_variation_%" in current_period:
|
|
||||||
attributes["price_coefficient_variation_%"] = current_period["price_coefficient_variation_%"]
|
|
||||||
if "volatility" in current_period:
|
if "volatility" in current_period:
|
||||||
attributes["volatility"] = current_period["volatility"] # Volatility is not a price, keep as-is
|
attributes["volatility"] = current_period["volatility"]
|
||||||
|
|
||||||
|
|
||||||
def add_comparison_attributes(attributes: dict, current_period: dict, factor: int) -> None:
|
def add_comparison_attributes(attributes: dict, current_period: dict) -> None:
|
||||||
"""
|
"""Add price comparison attributes (priority 4)."""
|
||||||
Add price comparison attributes (priority 4).
|
|
||||||
|
|
||||||
Args:
|
|
||||||
attributes: Target dict to add attributes to
|
|
||||||
current_period: Period dict with price diff data (in base currency)
|
|
||||||
factor: Display unit conversion factor (100 for subunit, 1 for base)
|
|
||||||
|
|
||||||
"""
|
|
||||||
# Convert price differences from base currency to display units
|
|
||||||
precision = _SUBUNIT_PRECISION if factor == _SUBUNIT_FACTOR else _BASE_PRECISION
|
|
||||||
|
|
||||||
if "period_price_diff_from_daily_min" in current_period:
|
if "period_price_diff_from_daily_min" in current_period:
|
||||||
attributes["period_price_diff_from_daily_min"] = round(
|
attributes["period_price_diff_from_daily_min"] = current_period["period_price_diff_from_daily_min"]
|
||||||
current_period["period_price_diff_from_daily_min"] * factor, precision
|
|
||||||
)
|
|
||||||
if "period_price_diff_from_daily_min_%" in current_period:
|
if "period_price_diff_from_daily_min_%" in current_period:
|
||||||
attributes["period_price_diff_from_daily_min_%"] = current_period["period_price_diff_from_daily_min_%"]
|
attributes["period_price_diff_from_daily_min_%"] = current_period["period_price_diff_from_daily_min_%"]
|
||||||
if "period_price_diff_from_daily_max" in current_period:
|
|
||||||
attributes["period_price_diff_from_daily_max"] = round(
|
|
||||||
current_period["period_price_diff_from_daily_max"] * factor, precision
|
|
||||||
)
|
|
||||||
if "period_price_diff_from_daily_max_%" in current_period:
|
|
||||||
attributes["period_price_diff_from_daily_max_%"] = current_period["period_price_diff_from_daily_max_%"]
|
|
||||||
|
|
||||||
|
|
||||||
def add_detail_attributes(attributes: dict, current_period: dict) -> None:
|
def add_detail_attributes(attributes: dict, current_period: dict) -> None:
|
||||||
|
|
@ -269,51 +217,11 @@ def add_relaxation_attributes(attributes: dict, current_period: dict) -> None:
|
||||||
attributes["relaxation_threshold_applied_%"] = current_period["relaxation_threshold_applied_%"]
|
attributes["relaxation_threshold_applied_%"] = current_period["relaxation_threshold_applied_%"]
|
||||||
|
|
||||||
|
|
||||||
def _convert_periods_to_display_units(period_summaries: list[dict], factor: int) -> list[dict]:
|
|
||||||
"""
|
|
||||||
Convert price values in periods array to display units.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
period_summaries: List of period dicts with price data (in base currency)
|
|
||||||
factor: Display unit conversion factor (100 for subunit, 1 for base)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
New list with converted period dicts
|
|
||||||
|
|
||||||
"""
|
|
||||||
precision = _SUBUNIT_PRECISION if factor == _SUBUNIT_FACTOR else _BASE_PRECISION
|
|
||||||
converted_periods = []
|
|
||||||
|
|
||||||
for period in period_summaries:
|
|
||||||
converted = period.copy()
|
|
||||||
|
|
||||||
# Convert all price fields
|
|
||||||
price_fields = ["price_mean", "price_median", "price_min", "price_max", "price_spread"]
|
|
||||||
for field in price_fields:
|
|
||||||
if field in converted:
|
|
||||||
converted[field] = round(converted[field] * factor, precision)
|
|
||||||
|
|
||||||
# Convert price differences (not percentages)
|
|
||||||
if "period_price_diff_from_daily_min" in converted:
|
|
||||||
converted["period_price_diff_from_daily_min"] = round(
|
|
||||||
converted["period_price_diff_from_daily_min"] * factor, precision
|
|
||||||
)
|
|
||||||
if "period_price_diff_from_daily_max" in converted:
|
|
||||||
converted["period_price_diff_from_daily_max"] = round(
|
|
||||||
converted["period_price_diff_from_daily_max"] * factor, precision
|
|
||||||
)
|
|
||||||
|
|
||||||
converted_periods.append(converted)
|
|
||||||
|
|
||||||
return converted_periods
|
|
||||||
|
|
||||||
|
|
||||||
def build_final_attributes_simple(
|
def build_final_attributes_simple(
|
||||||
current_period: dict | None,
|
current_period: dict | None,
|
||||||
period_summaries: list[dict],
|
period_summaries: list[dict],
|
||||||
*,
|
*,
|
||||||
time: TibberPricesTimeService,
|
time: TibberPricesTimeService,
|
||||||
config_entry: TibberPricesConfigEntry,
|
|
||||||
) -> dict:
|
) -> dict:
|
||||||
"""
|
"""
|
||||||
Build the final attributes dictionary from coordinator's period summaries.
|
Build the final attributes dictionary from coordinator's period summaries.
|
||||||
|
|
@ -322,12 +230,11 @@ def build_final_attributes_simple(
|
||||||
1. Adds the current timestamp (only thing calculated every 15min)
|
1. Adds the current timestamp (only thing calculated every 15min)
|
||||||
2. Uses the current/next period from summaries
|
2. Uses the current/next period from summaries
|
||||||
3. Adds nested period summaries
|
3. Adds nested period summaries
|
||||||
4. Converts prices to display units based on user configuration
|
|
||||||
|
|
||||||
Attributes are ordered following the documented priority:
|
Attributes are ordered following the documented priority:
|
||||||
1. Time information (timestamp, start, end, duration)
|
1. Time information (timestamp, start, end, duration)
|
||||||
2. Core decision attributes (level, rating_level, rating_difference_%)
|
2. Core decision attributes (level, rating_level, rating_difference_%)
|
||||||
3. Price statistics (price_mean, price_median, price_min, price_max, price_spread, volatility)
|
3. Price statistics (price_avg, price_min, price_max, price_spread, volatility)
|
||||||
4. Price differences (period_price_diff_from_daily_min, period_price_diff_from_daily_min_%)
|
4. Price differences (period_price_diff_from_daily_min, period_price_diff_from_daily_min_%)
|
||||||
5. Detail information (period_interval_count, period_position, periods_total, periods_remaining)
|
5. Detail information (period_interval_count, period_position, periods_total, periods_remaining)
|
||||||
6. Relaxation information (relaxation_active, relaxation_level, relaxation_threshold_original_%,
|
6. Relaxation information (relaxation_active, relaxation_level, relaxation_threshold_original_%,
|
||||||
|
|
@ -338,7 +245,6 @@ def build_final_attributes_simple(
|
||||||
current_period: The current or next period (already complete from coordinator)
|
current_period: The current or next period (already complete from coordinator)
|
||||||
period_summaries: All period summaries from coordinator
|
period_summaries: All period summaries from coordinator
|
||||||
time: TibberPricesTimeService instance (required)
|
time: TibberPricesTimeService instance (required)
|
||||||
config_entry: Config entry for display unit configuration
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Complete attributes dict with all fields
|
Complete attributes dict with all fields
|
||||||
|
|
@ -348,9 +254,6 @@ def build_final_attributes_simple(
|
||||||
current_minute = (now.minute // 15) * 15
|
current_minute = (now.minute // 15) * 15
|
||||||
timestamp = now.replace(minute=current_minute, second=0, microsecond=0)
|
timestamp = now.replace(minute=current_minute, second=0, microsecond=0)
|
||||||
|
|
||||||
# Get display unit factor (100 for subunit, 1 for base currency)
|
|
||||||
factor = get_display_unit_factor(config_entry)
|
|
||||||
|
|
||||||
if current_period:
|
if current_period:
|
||||||
# Build attributes in priority order using helper methods
|
# Build attributes in priority order using helper methods
|
||||||
attributes = {}
|
attributes = {}
|
||||||
|
|
@ -361,11 +264,11 @@ def build_final_attributes_simple(
|
||||||
# 2. Core decision attributes
|
# 2. Core decision attributes
|
||||||
add_decision_attributes(attributes, current_period)
|
add_decision_attributes(attributes, current_period)
|
||||||
|
|
||||||
# 3. Price statistics (converted to display units)
|
# 3. Price statistics
|
||||||
add_price_attributes(attributes, current_period, factor)
|
add_price_attributes(attributes, current_period)
|
||||||
|
|
||||||
# 4. Price differences (converted to display units)
|
# 4. Price differences
|
||||||
add_comparison_attributes(attributes, current_period, factor)
|
add_comparison_attributes(attributes, current_period)
|
||||||
|
|
||||||
# 5. Detail information
|
# 5. Detail information
|
||||||
add_detail_attributes(attributes, current_period)
|
add_detail_attributes(attributes, current_period)
|
||||||
|
|
@ -373,15 +276,15 @@ def build_final_attributes_simple(
|
||||||
# 6. Relaxation information (only if period was relaxed)
|
# 6. Relaxation information (only if period was relaxed)
|
||||||
add_relaxation_attributes(attributes, current_period)
|
add_relaxation_attributes(attributes, current_period)
|
||||||
|
|
||||||
# 7. Meta information (periods array - prices converted to display units)
|
# 7. Meta information (periods array)
|
||||||
attributes["periods"] = _convert_periods_to_display_units(period_summaries, factor)
|
attributes["periods"] = period_summaries
|
||||||
|
|
||||||
return attributes
|
return attributes
|
||||||
|
|
||||||
# No current/next period found - return all periods with timestamp (prices converted)
|
# No current/next period found - return all periods with timestamp
|
||||||
return {
|
return {
|
||||||
"timestamp": timestamp,
|
"timestamp": timestamp,
|
||||||
"periods": _convert_periods_to_display_units(period_summaries, factor),
|
"periods": period_summaries,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,6 @@ from homeassistant.components.binary_sensor import (
|
||||||
)
|
)
|
||||||
from homeassistant.core import callback
|
from homeassistant.core import callback
|
||||||
from homeassistant.exceptions import ConfigEntryAuthFailed
|
from homeassistant.exceptions import ConfigEntryAuthFailed
|
||||||
from homeassistant.helpers.restore_state import RestoreEntity
|
|
||||||
|
|
||||||
from .attributes import (
|
from .attributes import (
|
||||||
build_async_extra_state_attributes,
|
build_async_extra_state_attributes,
|
||||||
|
|
@ -33,41 +32,8 @@ if TYPE_CHECKING:
|
||||||
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
||||||
|
|
||||||
|
|
||||||
class TibberPricesBinarySensor(TibberPricesEntity, BinarySensorEntity, RestoreEntity):
|
class TibberPricesBinarySensor(TibberPricesEntity, BinarySensorEntity):
|
||||||
"""tibber_prices binary_sensor class with state restoration."""
|
"""tibber_prices binary_sensor class."""
|
||||||
|
|
||||||
# Attributes excluded from recorder history
|
|
||||||
# See: https://developers.home-assistant.io/docs/core/entity/#excluding-state-attributes-from-recorder-history
|
|
||||||
_unrecorded_attributes = frozenset(
|
|
||||||
{
|
|
||||||
"timestamp",
|
|
||||||
# Descriptions/Help Text (static, large)
|
|
||||||
"description",
|
|
||||||
"usage_tips",
|
|
||||||
# Large Nested Structures
|
|
||||||
"periods", # Array of all period summaries
|
|
||||||
# Frequently Changing Diagnostics
|
|
||||||
"icon_color",
|
|
||||||
"data_status",
|
|
||||||
# Static/Rarely Changing
|
|
||||||
"level_value",
|
|
||||||
"rating_value",
|
|
||||||
"level_id",
|
|
||||||
"rating_id",
|
|
||||||
# Relaxation Details
|
|
||||||
"relaxation_level",
|
|
||||||
"relaxation_threshold_original_%",
|
|
||||||
"relaxation_threshold_applied_%",
|
|
||||||
# Redundant/Derived
|
|
||||||
"price_spread",
|
|
||||||
"volatility",
|
|
||||||
"rating_difference_%",
|
|
||||||
"period_price_diff_from_daily_min",
|
|
||||||
"period_price_diff_from_daily_min_%",
|
|
||||||
"periods_total",
|
|
||||||
"periods_remaining",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
|
|
@ -85,11 +51,6 @@ class TibberPricesBinarySensor(TibberPricesEntity, BinarySensorEntity, RestoreEn
|
||||||
"""When entity is added to hass."""
|
"""When entity is added to hass."""
|
||||||
await super().async_added_to_hass()
|
await super().async_added_to_hass()
|
||||||
|
|
||||||
# Restore last state if available
|
|
||||||
if (last_state := await self.async_get_last_state()) is not None and last_state.state in ("on", "off"):
|
|
||||||
# Restore binary state (on/off) - will be used until first coordinator update
|
|
||||||
self._attr_is_on = last_state.state == "on"
|
|
||||||
|
|
||||||
# Register with coordinator for time-sensitive updates if applicable
|
# Register with coordinator for time-sensitive updates if applicable
|
||||||
if self.entity_description.key in TIME_SENSITIVE_ENTITY_KEYS:
|
if self.entity_description.key in TIME_SENSITIVE_ENTITY_KEYS:
|
||||||
self._time_sensitive_remove_listener = self.coordinator.async_add_time_sensitive_listener(
|
self._time_sensitive_remove_listener = self.coordinator.async_add_time_sensitive_listener(
|
||||||
|
|
@ -138,12 +99,7 @@ class TibberPricesBinarySensor(TibberPricesEntity, BinarySensorEntity, RestoreEn
|
||||||
"""Return True if the current time is within a best price period."""
|
"""Return True if the current time is within a best price period."""
|
||||||
if not self.coordinator.data:
|
if not self.coordinator.data:
|
||||||
return None
|
return None
|
||||||
attrs = get_price_intervals_attributes(
|
attrs = get_price_intervals_attributes(self.coordinator.data, reverse_sort=False, time=self.coordinator.time)
|
||||||
self.coordinator.data,
|
|
||||||
reverse_sort=False,
|
|
||||||
time=self.coordinator.time,
|
|
||||||
config_entry=self.coordinator.config_entry,
|
|
||||||
)
|
|
||||||
if not attrs:
|
if not attrs:
|
||||||
return False # Should not happen, but safety fallback
|
return False # Should not happen, but safety fallback
|
||||||
start = attrs.get("start")
|
start = attrs.get("start")
|
||||||
|
|
@ -157,12 +113,7 @@ class TibberPricesBinarySensor(TibberPricesEntity, BinarySensorEntity, RestoreEn
|
||||||
"""Return True if the current time is within a peak price period."""
|
"""Return True if the current time is within a peak price period."""
|
||||||
if not self.coordinator.data:
|
if not self.coordinator.data:
|
||||||
return None
|
return None
|
||||||
attrs = get_price_intervals_attributes(
|
attrs = get_price_intervals_attributes(self.coordinator.data, reverse_sort=True, time=self.coordinator.time)
|
||||||
self.coordinator.data,
|
|
||||||
reverse_sort=True,
|
|
||||||
time=self.coordinator.time,
|
|
||||||
config_entry=self.coordinator.config_entry,
|
|
||||||
)
|
|
||||||
if not attrs:
|
if not attrs:
|
||||||
return False # Should not happen, but safety fallback
|
return False # Should not happen, but safety fallback
|
||||||
start = attrs.get("start")
|
start = attrs.get("start")
|
||||||
|
|
@ -197,31 +148,6 @@ class TibberPricesBinarySensor(TibberPricesEntity, BinarySensorEntity, RestoreEn
|
||||||
return False
|
return False
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@property
|
|
||||||
def available(self) -> bool:
|
|
||||||
"""
|
|
||||||
Return if entity is available.
|
|
||||||
|
|
||||||
Override base implementation for connection sensor which should
|
|
||||||
always be available to show connection state.
|
|
||||||
"""
|
|
||||||
# Connection sensor is always available (shows connection state)
|
|
||||||
if self.entity_description.key == "connection":
|
|
||||||
return True
|
|
||||||
|
|
||||||
# All other binary sensors use base availability logic
|
|
||||||
return super().available
|
|
||||||
|
|
||||||
@property
|
|
||||||
def force_update(self) -> bool:
|
|
||||||
"""
|
|
||||||
Force update for connection sensor to record all state changes.
|
|
||||||
|
|
||||||
Connection sensor should write every state change to history,
|
|
||||||
even if the state (on/off) is the same, to track connectivity issues.
|
|
||||||
"""
|
|
||||||
return self.entity_description.key == "connection"
|
|
||||||
|
|
||||||
def _has_ventilation_system_state(self) -> bool | None:
|
def _has_ventilation_system_state(self) -> bool | None:
|
||||||
"""Return True if the home has a ventilation system."""
|
"""Return True if the home has a ventilation system."""
|
||||||
if not self.coordinator.data:
|
if not self.coordinator.data:
|
||||||
|
|
@ -280,19 +206,9 @@ class TibberPricesBinarySensor(TibberPricesEntity, BinarySensorEntity, RestoreEn
|
||||||
key = self.entity_description.key
|
key = self.entity_description.key
|
||||||
|
|
||||||
if key == "peak_price_period":
|
if key == "peak_price_period":
|
||||||
return get_price_intervals_attributes(
|
return get_price_intervals_attributes(self.coordinator.data, reverse_sort=True, time=self.coordinator.time)
|
||||||
self.coordinator.data,
|
|
||||||
reverse_sort=True,
|
|
||||||
time=self.coordinator.time,
|
|
||||||
config_entry=self.coordinator.config_entry,
|
|
||||||
)
|
|
||||||
if key == "best_price_period":
|
if key == "best_price_period":
|
||||||
return get_price_intervals_attributes(
|
return get_price_intervals_attributes(self.coordinator.data, reverse_sort=False, time=self.coordinator.time)
|
||||||
self.coordinator.data,
|
|
||||||
reverse_sort=False,
|
|
||||||
time=self.coordinator.time,
|
|
||||||
config_entry=self.coordinator.config_entry,
|
|
||||||
)
|
|
||||||
if key == "tomorrow_data_available":
|
if key == "tomorrow_data_available":
|
||||||
return self._get_tomorrow_data_available_attributes()
|
return self._get_tomorrow_data_available_attributes()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -38,7 +38,6 @@ ENTITY_DESCRIPTIONS = (
|
||||||
icon="mdi:calendar-check",
|
icon="mdi:calendar-check",
|
||||||
device_class=None, # No specific device_class = shows generic "On/Off"
|
device_class=None, # No specific device_class = shows generic "On/Off"
|
||||||
entity_category=EntityCategory.DIAGNOSTIC,
|
entity_category=EntityCategory.DIAGNOSTIC,
|
||||||
entity_registry_enabled_default=True, # Critical for automations
|
|
||||||
),
|
),
|
||||||
BinarySensorEntityDescription(
|
BinarySensorEntityDescription(
|
||||||
key="has_ventilation_system",
|
key="has_ventilation_system",
|
||||||
|
|
|
||||||
|
|
@ -90,15 +90,14 @@ class PeriodSummary(TypedDict, total=False):
|
||||||
rating_difference_pct: float # Difference from daily average (%)
|
rating_difference_pct: float # Difference from daily average (%)
|
||||||
|
|
||||||
# Price statistics (priority 3)
|
# Price statistics (priority 3)
|
||||||
price_mean: float # Arithmetic mean price in period
|
price_avg: float # Average price in period (minor currency)
|
||||||
price_median: float # Median price in period
|
price_min: float # Minimum price in period (minor currency)
|
||||||
price_min: float # Minimum price in period
|
price_max: float # Maximum price in period (minor currency)
|
||||||
price_max: float # Maximum price in period
|
|
||||||
price_spread: float # Price spread (max - min)
|
price_spread: float # Price spread (max - min)
|
||||||
volatility: float # Price volatility within period
|
volatility: float # Price volatility within period
|
||||||
|
|
||||||
# Price comparison (priority 4)
|
# Price comparison (priority 4)
|
||||||
period_price_diff_from_daily_min: float # Difference from daily min
|
period_price_diff_from_daily_min: float # Difference from daily min (minor currency)
|
||||||
period_price_diff_from_daily_min_pct: float # Difference from daily min (%)
|
period_price_diff_from_daily_min_pct: float # Difference from daily min (%)
|
||||||
|
|
||||||
# Detail information (priority 5)
|
# Detail information (priority 5)
|
||||||
|
|
@ -123,7 +122,7 @@ class PeriodAttributes(BaseAttributes, total=False):
|
||||||
Attributes follow priority ordering:
|
Attributes follow priority ordering:
|
||||||
1. Time information (timestamp, start, end, duration_minutes)
|
1. Time information (timestamp, start, end, duration_minutes)
|
||||||
2. Core decision attributes (level, rating_level, rating_difference_%)
|
2. Core decision attributes (level, rating_level, rating_difference_%)
|
||||||
3. Price statistics (price_mean, price_median, price_min, price_max, price_spread, volatility)
|
3. Price statistics (price_avg, price_min, price_max, price_spread, volatility)
|
||||||
4. Price comparison (period_price_diff_from_daily_min, period_price_diff_from_daily_min_%)
|
4. Price comparison (period_price_diff_from_daily_min, period_price_diff_from_daily_min_%)
|
||||||
5. Detail information (period_interval_count, period_position, periods_total, periods_remaining)
|
5. Detail information (period_interval_count, period_position, periods_total, periods_remaining)
|
||||||
6. Relaxation information (only if period was relaxed)
|
6. Relaxation information (only if period was relaxed)
|
||||||
|
|
@ -141,15 +140,14 @@ class PeriodAttributes(BaseAttributes, total=False):
|
||||||
rating_difference_pct: float # Difference from daily average (%)
|
rating_difference_pct: float # Difference from daily average (%)
|
||||||
|
|
||||||
# Price statistics (priority 3)
|
# Price statistics (priority 3)
|
||||||
price_mean: float # Arithmetic mean price in current/next period
|
price_avg: float # Average price in current/next period (minor currency)
|
||||||
price_median: float # Median price in current/next period
|
price_min: float # Minimum price in current/next period (minor currency)
|
||||||
price_min: float # Minimum price in current/next period
|
price_max: float # Maximum price in current/next period (minor currency)
|
||||||
price_max: float # Maximum price in current/next period
|
|
||||||
price_spread: float # Price spread (max - min) in current/next period
|
price_spread: float # Price spread (max - min) in current/next period
|
||||||
volatility: float # Price volatility within current/next period
|
volatility: float # Price volatility within current/next period
|
||||||
|
|
||||||
# Price comparison (priority 4)
|
# Price comparison (priority 4)
|
||||||
period_price_diff_from_daily_min: float # Difference from daily min
|
period_price_diff_from_daily_min: float # Difference from daily min (minor currency)
|
||||||
period_price_diff_from_daily_min_pct: float # Difference from daily min (%)
|
period_price_diff_from_daily_min_pct: float # Difference from daily min (%)
|
||||||
|
|
||||||
# Detail information (priority 5)
|
# Detail information (priority 5)
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,6 @@ from .config_flow_handlers.schemas import (
|
||||||
get_best_price_schema,
|
get_best_price_schema,
|
||||||
get_options_init_schema,
|
get_options_init_schema,
|
||||||
get_peak_price_schema,
|
get_peak_price_schema,
|
||||||
get_price_level_schema,
|
|
||||||
get_price_rating_schema,
|
get_price_rating_schema,
|
||||||
get_price_trend_schema,
|
get_price_trend_schema,
|
||||||
get_reauth_confirm_schema,
|
get_reauth_confirm_schema,
|
||||||
|
|
@ -42,7 +41,6 @@ __all__ = [
|
||||||
"get_best_price_schema",
|
"get_best_price_schema",
|
||||||
"get_options_init_schema",
|
"get_options_init_schema",
|
||||||
"get_peak_price_schema",
|
"get_peak_price_schema",
|
||||||
"get_price_level_schema",
|
|
||||||
"get_price_rating_schema",
|
"get_price_rating_schema",
|
||||||
"get_price_trend_schema",
|
"get_price_trend_schema",
|
||||||
"get_reauth_confirm_schema",
|
"get_reauth_confirm_schema",
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,6 @@ from custom_components.tibber_prices.config_flow_handlers.schemas import (
|
||||||
get_best_price_schema,
|
get_best_price_schema,
|
||||||
get_options_init_schema,
|
get_options_init_schema,
|
||||||
get_peak_price_schema,
|
get_peak_price_schema,
|
||||||
get_price_level_schema,
|
|
||||||
get_price_rating_schema,
|
get_price_rating_schema,
|
||||||
get_price_trend_schema,
|
get_price_trend_schema,
|
||||||
get_reauth_confirm_schema,
|
get_reauth_confirm_schema,
|
||||||
|
|
@ -57,7 +56,6 @@ __all__ = [
|
||||||
"get_best_price_schema",
|
"get_best_price_schema",
|
||||||
"get_options_init_schema",
|
"get_options_init_schema",
|
||||||
"get_peak_price_schema",
|
"get_peak_price_schema",
|
||||||
"get_price_level_schema",
|
|
||||||
"get_price_rating_schema",
|
"get_price_rating_schema",
|
||||||
"get_price_trend_schema",
|
"get_price_trend_schema",
|
||||||
"get_reauth_confirm_schema",
|
"get_reauth_confirm_schema",
|
||||||
|
|
|
||||||
|
|
@ -1,243 +0,0 @@
|
||||||
"""
|
|
||||||
Entity check utilities for options flow.
|
|
||||||
|
|
||||||
This module provides functions to check if relevant entities are enabled
|
|
||||||
for specific options flow steps. If no relevant entities are enabled,
|
|
||||||
a warning can be displayed to users.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import logging
|
|
||||||
from typing import TYPE_CHECKING
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import DOMAIN
|
|
||||||
from homeassistant.helpers.entity_registry import async_get as async_get_entity_registry
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from homeassistant.config_entries import ConfigEntry
|
|
||||||
from homeassistant.core import HomeAssistant
|
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# Maximum number of example sensors to show in warning message
|
|
||||||
MAX_EXAMPLE_SENSORS = 3
|
|
||||||
# Threshold for using "and" vs "," in formatted names
|
|
||||||
NAMES_SIMPLE_JOIN_THRESHOLD = 2
|
|
||||||
|
|
||||||
# Mapping of options flow steps to affected sensor keys
|
|
||||||
# These are the entity keys (from sensor/definitions.py and binary_sensor/definitions.py)
|
|
||||||
# that are affected by each settings page
|
|
||||||
STEP_TO_SENSOR_KEYS: dict[str, list[str]] = {
|
|
||||||
# Price Rating settings affect all rating sensors
|
|
||||||
"current_interval_price_rating": [
|
|
||||||
# Interval rating sensors
|
|
||||||
"current_interval_price_rating",
|
|
||||||
"next_interval_price_rating",
|
|
||||||
"previous_interval_price_rating",
|
|
||||||
# Rolling hour rating sensors
|
|
||||||
"current_hour_price_rating",
|
|
||||||
"next_hour_price_rating",
|
|
||||||
# Daily rating sensors
|
|
||||||
"yesterday_price_rating",
|
|
||||||
"today_price_rating",
|
|
||||||
"tomorrow_price_rating",
|
|
||||||
],
|
|
||||||
# Price Level settings affect level sensors and period binary sensors
|
|
||||||
"price_level": [
|
|
||||||
# Interval level sensors
|
|
||||||
"current_interval_price_level",
|
|
||||||
"next_interval_price_level",
|
|
||||||
"previous_interval_price_level",
|
|
||||||
# Rolling hour level sensors
|
|
||||||
"current_hour_price_level",
|
|
||||||
"next_hour_price_level",
|
|
||||||
# Daily level sensors
|
|
||||||
"yesterday_price_level",
|
|
||||||
"today_price_level",
|
|
||||||
"tomorrow_price_level",
|
|
||||||
# Binary sensors that use level filtering
|
|
||||||
"best_price_period",
|
|
||||||
"peak_price_period",
|
|
||||||
],
|
|
||||||
# Volatility settings affect volatility sensors
|
|
||||||
"volatility": [
|
|
||||||
"today_volatility",
|
|
||||||
"tomorrow_volatility",
|
|
||||||
"next_24h_volatility",
|
|
||||||
"today_tomorrow_volatility",
|
|
||||||
# Also affects trend sensors (adaptive thresholds)
|
|
||||||
"current_price_trend",
|
|
||||||
"next_price_trend_change",
|
|
||||||
"price_trend_1h",
|
|
||||||
"price_trend_2h",
|
|
||||||
"price_trend_3h",
|
|
||||||
"price_trend_4h",
|
|
||||||
"price_trend_5h",
|
|
||||||
"price_trend_6h",
|
|
||||||
"price_trend_8h",
|
|
||||||
"price_trend_12h",
|
|
||||||
],
|
|
||||||
# Best Price settings affect best price binary sensor and timing sensors
|
|
||||||
"best_price": [
|
|
||||||
# Binary sensor
|
|
||||||
"best_price_period",
|
|
||||||
# Timing sensors
|
|
||||||
"best_price_end_time",
|
|
||||||
"best_price_period_duration",
|
|
||||||
"best_price_remaining_minutes",
|
|
||||||
"best_price_progress",
|
|
||||||
"best_price_next_start_time",
|
|
||||||
"best_price_next_in_minutes",
|
|
||||||
],
|
|
||||||
# Peak Price settings affect peak price binary sensor and timing sensors
|
|
||||||
"peak_price": [
|
|
||||||
# Binary sensor
|
|
||||||
"peak_price_period",
|
|
||||||
# Timing sensors
|
|
||||||
"peak_price_end_time",
|
|
||||||
"peak_price_period_duration",
|
|
||||||
"peak_price_remaining_minutes",
|
|
||||||
"peak_price_progress",
|
|
||||||
"peak_price_next_start_time",
|
|
||||||
"peak_price_next_in_minutes",
|
|
||||||
],
|
|
||||||
# Price Trend settings affect trend sensors
|
|
||||||
"price_trend": [
|
|
||||||
"current_price_trend",
|
|
||||||
"next_price_trend_change",
|
|
||||||
"price_trend_1h",
|
|
||||||
"price_trend_2h",
|
|
||||||
"price_trend_3h",
|
|
||||||
"price_trend_4h",
|
|
||||||
"price_trend_5h",
|
|
||||||
"price_trend_6h",
|
|
||||||
"price_trend_8h",
|
|
||||||
"price_trend_12h",
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def check_relevant_entities_enabled(
|
|
||||||
hass: HomeAssistant,
|
|
||||||
config_entry: ConfigEntry,
|
|
||||||
step_id: str,
|
|
||||||
) -> tuple[bool, list[str]]:
|
|
||||||
"""
|
|
||||||
Check if any relevant entities for a settings step are enabled.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
hass: Home Assistant instance
|
|
||||||
config_entry: Current config entry
|
|
||||||
step_id: The options flow step ID
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple of (has_enabled_entities, list_of_example_sensor_names)
|
|
||||||
- has_enabled_entities: True if at least one relevant entity is enabled
|
|
||||||
- list_of_example_sensor_names: List of example sensor keys for the warning message
|
|
||||||
|
|
||||||
"""
|
|
||||||
sensor_keys = STEP_TO_SENSOR_KEYS.get(step_id)
|
|
||||||
if not sensor_keys:
|
|
||||||
# No mapping for this step - no check needed
|
|
||||||
return True, []
|
|
||||||
|
|
||||||
entity_registry = async_get_entity_registry(hass)
|
|
||||||
entry_id = config_entry.entry_id
|
|
||||||
|
|
||||||
enabled_count = 0
|
|
||||||
example_sensors: list[str] = []
|
|
||||||
|
|
||||||
for entity in entity_registry.entities.values():
|
|
||||||
# Check if entity belongs to our integration and config entry
|
|
||||||
if entity.config_entry_id != entry_id:
|
|
||||||
continue
|
|
||||||
if entity.platform != DOMAIN:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Extract the sensor key from unique_id
|
|
||||||
# unique_id format: "{home_id}_{sensor_key}" or "{entry_id}_{sensor_key}"
|
|
||||||
unique_id = entity.unique_id or ""
|
|
||||||
# The sensor key is after the last underscore that separates the ID prefix
|
|
||||||
# We check if any of our target keys is contained in the unique_id
|
|
||||||
for sensor_key in sensor_keys:
|
|
||||||
if unique_id.endswith(f"_{sensor_key}") or unique_id == sensor_key:
|
|
||||||
# Found a matching entity
|
|
||||||
if entity.disabled_by is None:
|
|
||||||
# Entity is enabled
|
|
||||||
enabled_count += 1
|
|
||||||
break
|
|
||||||
# Entity is disabled - add to examples (max MAX_EXAMPLE_SENSORS)
|
|
||||||
if len(example_sensors) < MAX_EXAMPLE_SENSORS and sensor_key not in example_sensors:
|
|
||||||
example_sensors.append(sensor_key)
|
|
||||||
break
|
|
||||||
|
|
||||||
# If we found enabled entities, return success
|
|
||||||
if enabled_count > 0:
|
|
||||||
return True, []
|
|
||||||
|
|
||||||
# No enabled entities - return the example sensors for the warning
|
|
||||||
# If we haven't collected any examples yet, use the first from the mapping
|
|
||||||
if not example_sensors:
|
|
||||||
example_sensors = sensor_keys[:MAX_EXAMPLE_SENSORS]
|
|
||||||
|
|
||||||
return False, example_sensors
|
|
||||||
|
|
||||||
|
|
||||||
def format_sensor_names_for_warning(sensor_keys: list[str]) -> str:
|
|
||||||
"""
|
|
||||||
Format sensor keys into human-readable names for warning message.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
sensor_keys: List of sensor keys
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Formatted string like "Best Price Period, Best Price End Time, ..."
|
|
||||||
|
|
||||||
"""
|
|
||||||
# Convert snake_case keys to Title Case names
|
|
||||||
names = []
|
|
||||||
for key in sensor_keys:
|
|
||||||
# Replace underscores with spaces and title case
|
|
||||||
name = key.replace("_", " ").title()
|
|
||||||
names.append(name)
|
|
||||||
|
|
||||||
if len(names) <= NAMES_SIMPLE_JOIN_THRESHOLD:
|
|
||||||
return " and ".join(names)
|
|
||||||
|
|
||||||
return ", ".join(names[:-1]) + ", and " + names[-1]
|
|
||||||
|
|
||||||
|
|
||||||
def check_chart_data_export_enabled(
|
|
||||||
hass: HomeAssistant,
|
|
||||||
config_entry: ConfigEntry,
|
|
||||||
) -> bool:
|
|
||||||
"""
|
|
||||||
Check if the Chart Data Export sensor is enabled.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
hass: Home Assistant instance
|
|
||||||
config_entry: Current config entry
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if the Chart Data Export sensor is enabled, False otherwise
|
|
||||||
|
|
||||||
"""
|
|
||||||
entity_registry = async_get_entity_registry(hass)
|
|
||||||
entry_id = config_entry.entry_id
|
|
||||||
|
|
||||||
for entity in entity_registry.entities.values():
|
|
||||||
# Check if entity belongs to our integration and config entry
|
|
||||||
if entity.config_entry_id != entry_id:
|
|
||||||
continue
|
|
||||||
if entity.platform != DOMAIN:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Check for chart_data_export sensor
|
|
||||||
unique_id = entity.unique_id or ""
|
|
||||||
if unique_id.endswith("_chart_data_export") or unique_id == "chart_data_export":
|
|
||||||
# Found the entity - check if enabled
|
|
||||||
return entity.disabled_by is None
|
|
||||||
|
|
||||||
# Entity not found (shouldn't happen, but treat as disabled)
|
|
||||||
return False
|
|
||||||
|
|
@ -3,28 +3,18 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from copy import deepcopy
|
from typing import TYPE_CHECKING, Any, ClassVar
|
||||||
from typing import TYPE_CHECKING, Any
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from collections.abc import Mapping
|
from collections.abc import Mapping
|
||||||
|
|
||||||
from custom_components.tibber_prices.config_flow_handlers.entity_check import (
|
|
||||||
check_chart_data_export_enabled,
|
|
||||||
check_relevant_entities_enabled,
|
|
||||||
format_sensor_names_for_warning,
|
|
||||||
)
|
|
||||||
from custom_components.tibber_prices.config_flow_handlers.schemas import (
|
from custom_components.tibber_prices.config_flow_handlers.schemas import (
|
||||||
ConfigOverrides,
|
|
||||||
get_best_price_schema,
|
get_best_price_schema,
|
||||||
get_chart_data_export_schema,
|
get_chart_data_export_schema,
|
||||||
get_display_settings_schema,
|
|
||||||
get_options_init_schema,
|
get_options_init_schema,
|
||||||
get_peak_price_schema,
|
get_peak_price_schema,
|
||||||
get_price_level_schema,
|
|
||||||
get_price_rating_schema,
|
get_price_rating_schema,
|
||||||
get_price_trend_schema,
|
get_price_trend_schema,
|
||||||
get_reset_to_defaults_schema,
|
|
||||||
get_volatility_schema,
|
get_volatility_schema,
|
||||||
)
|
)
|
||||||
from custom_components.tibber_prices.config_flow_handlers.validators import (
|
from custom_components.tibber_prices.config_flow_handlers.validators import (
|
||||||
|
|
@ -39,8 +29,6 @@ from custom_components.tibber_prices.config_flow_handlers.validators import (
|
||||||
validate_price_rating_thresholds,
|
validate_price_rating_thresholds,
|
||||||
validate_price_trend_falling,
|
validate_price_trend_falling,
|
||||||
validate_price_trend_rising,
|
validate_price_trend_rising,
|
||||||
validate_price_trend_strongly_falling,
|
|
||||||
validate_price_trend_strongly_rising,
|
|
||||||
validate_relaxation_attempts,
|
validate_relaxation_attempts,
|
||||||
validate_volatility_threshold_high,
|
validate_volatility_threshold_high,
|
||||||
validate_volatility_threshold_moderate,
|
validate_volatility_threshold_moderate,
|
||||||
|
|
@ -62,8 +50,6 @@ from custom_components.tibber_prices.const import (
|
||||||
CONF_PRICE_RATING_THRESHOLD_LOW,
|
CONF_PRICE_RATING_THRESHOLD_LOW,
|
||||||
CONF_PRICE_TREND_THRESHOLD_FALLING,
|
CONF_PRICE_TREND_THRESHOLD_FALLING,
|
||||||
CONF_PRICE_TREND_THRESHOLD_RISING,
|
CONF_PRICE_TREND_THRESHOLD_RISING,
|
||||||
CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING,
|
|
||||||
CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING,
|
|
||||||
CONF_RELAXATION_ATTEMPTS_BEST,
|
CONF_RELAXATION_ATTEMPTS_BEST,
|
||||||
CONF_RELAXATION_ATTEMPTS_PEAK,
|
CONF_RELAXATION_ATTEMPTS_PEAK,
|
||||||
CONF_VOLATILITY_THRESHOLD_HIGH,
|
CONF_VOLATILITY_THRESHOLD_HIGH,
|
||||||
|
|
@ -73,11 +59,8 @@ from custom_components.tibber_prices.const import (
|
||||||
DEFAULT_VOLATILITY_THRESHOLD_MODERATE,
|
DEFAULT_VOLATILITY_THRESHOLD_MODERATE,
|
||||||
DEFAULT_VOLATILITY_THRESHOLD_VERY_HIGH,
|
DEFAULT_VOLATILITY_THRESHOLD_VERY_HIGH,
|
||||||
DOMAIN,
|
DOMAIN,
|
||||||
async_get_translation,
|
|
||||||
get_default_options,
|
|
||||||
)
|
)
|
||||||
from homeassistant.config_entries import ConfigFlowResult, OptionsFlow
|
from homeassistant.config_entries import ConfigFlowResult, OptionsFlow
|
||||||
from homeassistant.helpers import entity_registry as er
|
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
_LOGGER = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
@ -85,34 +68,22 @@ _LOGGER = logging.getLogger(__name__)
|
||||||
class TibberPricesOptionsFlowHandler(OptionsFlow):
|
class TibberPricesOptionsFlowHandler(OptionsFlow):
|
||||||
"""Handle options for tibber_prices entries."""
|
"""Handle options for tibber_prices entries."""
|
||||||
|
|
||||||
|
# Step progress tracking
|
||||||
|
_TOTAL_STEPS: ClassVar[int] = 7
|
||||||
|
_STEP_INFO: ClassVar[dict[str, int]] = {
|
||||||
|
"init": 1,
|
||||||
|
"current_interval_price_rating": 2,
|
||||||
|
"volatility": 3,
|
||||||
|
"best_price": 4,
|
||||||
|
"peak_price": 5,
|
||||||
|
"price_trend": 6,
|
||||||
|
"chart_data_export": 7,
|
||||||
|
}
|
||||||
|
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
"""Initialize options flow."""
|
"""Initialize options flow."""
|
||||||
self._options: dict[str, Any] = {}
|
self._options: dict[str, Any] = {}
|
||||||
|
|
||||||
def _merge_section_data(self, user_input: dict[str, Any]) -> None:
|
|
||||||
"""
|
|
||||||
Merge section data from form input into options.
|
|
||||||
|
|
||||||
Home Assistant forms with section() return nested dicts like:
|
|
||||||
{"section_name": {"setting1": value1, "setting2": value2}}
|
|
||||||
|
|
||||||
We need to preserve this structure in config_entry.options.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
user_input: Nested user input from form with sections
|
|
||||||
|
|
||||||
"""
|
|
||||||
for section_key, section_data in user_input.items():
|
|
||||||
if isinstance(section_data, dict):
|
|
||||||
# This is a section - ensure the section exists in options
|
|
||||||
if section_key not in self._options:
|
|
||||||
self._options[section_key] = {}
|
|
||||||
# Update the section with new values
|
|
||||||
self._options[section_key].update(section_data)
|
|
||||||
else:
|
|
||||||
# This is a direct value - keep it as is
|
|
||||||
self._options[section_key] = section_data
|
|
||||||
|
|
||||||
def _migrate_config_options(self, options: Mapping[str, Any]) -> dict[str, Any]:
|
def _migrate_config_options(self, options: Mapping[str, Any]) -> dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Migrate deprecated config options to current format.
|
Migrate deprecated config options to current format.
|
||||||
|
|
@ -127,10 +98,7 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
||||||
Migrated options dict with deprecated keys removed/renamed
|
Migrated options dict with deprecated keys removed/renamed
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# CRITICAL: Use deepcopy to avoid modifying the original config_entry.options
|
migrated = dict(options)
|
||||||
# If we use dict(options), nested dicts are still referenced, causing
|
|
||||||
# self._options modifications to leak into config_entry.options
|
|
||||||
migrated = deepcopy(dict(options))
|
|
||||||
migration_performed = False
|
migration_performed = False
|
||||||
|
|
||||||
# Migration 1: Rename relaxation_step_* to relaxation_attempts_*
|
# Migration 1: Rename relaxation_step_* to relaxation_attempts_*
|
||||||
|
|
@ -174,341 +142,45 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
||||||
|
|
||||||
return migrated
|
return migrated
|
||||||
|
|
||||||
def _save_options_if_changed(self) -> bool:
|
def _get_step_description_placeholders(self, step_id: str) -> dict[str, str]:
|
||||||
"""
|
"""Get description placeholders with step progress."""
|
||||||
Save options only if they actually changed.
|
if step_id not in self._STEP_INFO:
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if options were updated, False if no changes detected
|
|
||||||
|
|
||||||
"""
|
|
||||||
# Compare old and new options
|
|
||||||
if self.config_entry.options != self._options:
|
|
||||||
self.hass.config_entries.async_update_entry(
|
|
||||||
self.config_entry,
|
|
||||||
options=self._options,
|
|
||||||
)
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _get_entity_warning_placeholders(self, step_id: str) -> dict[str, str]:
|
|
||||||
"""
|
|
||||||
Get description placeholders for entity availability warning.
|
|
||||||
|
|
||||||
Checks if any relevant entities for the step are enabled.
|
|
||||||
If not, adds a warning placeholder to display in the form description.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
step_id: The options flow step ID
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dictionary with placeholder keys for the form description
|
|
||||||
|
|
||||||
"""
|
|
||||||
has_enabled, example_sensors = check_relevant_entities_enabled(self.hass, self.config_entry, step_id)
|
|
||||||
|
|
||||||
if has_enabled:
|
|
||||||
# No warning needed - return empty placeholder
|
|
||||||
return {"entity_warning": ""}
|
|
||||||
|
|
||||||
# Build warning message with example sensor names
|
|
||||||
sensor_names = format_sensor_names_for_warning(example_sensors)
|
|
||||||
return {
|
|
||||||
"entity_warning": f"\n\n⚠️ **Note:** No sensors affected by these settings are currently enabled. "
|
|
||||||
f"To use these settings, first enable relevant sensors like *{sensor_names}* "
|
|
||||||
f"in **Settings → Devices & Services → Tibber Prices → Entities**."
|
|
||||||
}
|
|
||||||
|
|
||||||
def _get_enabled_config_entities(self) -> set[str]:
|
|
||||||
"""
|
|
||||||
Get config keys that have their config entity enabled.
|
|
||||||
|
|
||||||
Checks the entity registry for number/switch entities that override
|
|
||||||
config values. Returns the config_key for each enabled entity.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Set of config keys (e.g., "best_price_flex", "enable_min_periods_best")
|
|
||||||
|
|
||||||
"""
|
|
||||||
enabled_keys: set[str] = set()
|
|
||||||
ent_reg = er.async_get(self.hass)
|
|
||||||
|
|
||||||
_LOGGER.debug(
|
|
||||||
"Checking for enabled config override entities for entry %s",
|
|
||||||
self.config_entry.entry_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Map entity keys to their config keys
|
|
||||||
# Entity keys are defined in number/definitions.py and switch/definitions.py
|
|
||||||
override_entities = {
|
|
||||||
# Number entities (best price)
|
|
||||||
"number.best_price_flex_override": "best_price_flex",
|
|
||||||
"number.best_price_min_distance_override": "best_price_min_distance_from_avg",
|
|
||||||
"number.best_price_min_period_length_override": "best_price_min_period_length",
|
|
||||||
"number.best_price_min_periods_override": "min_periods_best",
|
|
||||||
"number.best_price_relaxation_attempts_override": "relaxation_attempts_best",
|
|
||||||
"number.best_price_gap_count_override": "best_price_max_level_gap_count",
|
|
||||||
# Number entities (peak price)
|
|
||||||
"number.peak_price_flex_override": "peak_price_flex",
|
|
||||||
"number.peak_price_min_distance_override": "peak_price_min_distance_from_avg",
|
|
||||||
"number.peak_price_min_period_length_override": "peak_price_min_period_length",
|
|
||||||
"number.peak_price_min_periods_override": "min_periods_peak",
|
|
||||||
"number.peak_price_relaxation_attempts_override": "relaxation_attempts_peak",
|
|
||||||
"number.peak_price_gap_count_override": "peak_price_max_level_gap_count",
|
|
||||||
# Switch entities
|
|
||||||
"switch.best_price_enable_relaxation_override": "enable_min_periods_best",
|
|
||||||
"switch.peak_price_enable_relaxation_override": "enable_min_periods_peak",
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check each possible override entity
|
|
||||||
for entity_id_suffix, config_key in override_entities.items():
|
|
||||||
# Entity IDs include device name, so we need to search by unique_id pattern
|
|
||||||
# The unique_id follows pattern: {config_entry_id}_{entity_key}
|
|
||||||
domain, entity_key = entity_id_suffix.split(".", 1)
|
|
||||||
|
|
||||||
# Find entity by iterating through registry
|
|
||||||
for entity_entry in ent_reg.entities.values():
|
|
||||||
if (
|
|
||||||
entity_entry.domain == domain
|
|
||||||
and entity_entry.config_entry_id == self.config_entry.entry_id
|
|
||||||
and entity_entry.unique_id
|
|
||||||
and entity_entry.unique_id.endswith(entity_key)
|
|
||||||
and not entity_entry.disabled
|
|
||||||
):
|
|
||||||
_LOGGER.debug(
|
|
||||||
"Found enabled config override entity: %s -> config_key=%s",
|
|
||||||
entity_entry.entity_id,
|
|
||||||
config_key,
|
|
||||||
)
|
|
||||||
enabled_keys.add(config_key)
|
|
||||||
break
|
|
||||||
|
|
||||||
_LOGGER.debug("Enabled config override keys: %s", enabled_keys)
|
|
||||||
return enabled_keys
|
|
||||||
|
|
||||||
def _get_active_overrides(self) -> ConfigOverrides:
|
|
||||||
"""
|
|
||||||
Build override dict from enabled config entities.
|
|
||||||
|
|
||||||
Returns a dict structure compatible with schema functions.
|
|
||||||
"""
|
|
||||||
enabled_keys = self._get_enabled_config_entities()
|
|
||||||
if not enabled_keys:
|
|
||||||
_LOGGER.debug("No enabled config override entities found")
|
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
# Build structure expected by schema: {section: {key: True}}
|
step_num = self._STEP_INFO[step_id]
|
||||||
# Section doesn't matter for read_only check, we just need the key present
|
|
||||||
overrides: ConfigOverrides = {"_enabled": {}}
|
|
||||||
for key in enabled_keys:
|
|
||||||
overrides["_enabled"][key] = True
|
|
||||||
|
|
||||||
_LOGGER.debug("Active overrides structure: %s", overrides)
|
# Get translations loaded by Home Assistant
|
||||||
return overrides
|
standard_translations_key = f"{DOMAIN}_standard_translations_{self.hass.config.language}"
|
||||||
|
translations = self.hass.data.get(standard_translations_key, {})
|
||||||
|
|
||||||
def _get_override_warning_placeholder(self, step_id: str, overrides: ConfigOverrides) -> dict[str, str]:
|
# Get step progress text from translations with placeholders
|
||||||
"""
|
step_progress_template = translations.get("common", {}).get("step_progress", "Step {step_num} of {total_steps}")
|
||||||
Get description placeholder for config override warning.
|
step_progress = step_progress_template.format(step_num=step_num, total_steps=self._TOTAL_STEPS)
|
||||||
|
|
||||||
Args:
|
return {
|
||||||
step_id: The options flow step ID (e.g., "best_price", "peak_price")
|
"step_progress": step_progress,
|
||||||
overrides: Active overrides dictionary
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dictionary with 'override_warning' placeholder
|
|
||||||
|
|
||||||
"""
|
|
||||||
# Define which config keys belong to each step
|
|
||||||
step_keys: dict[str, set[str]] = {
|
|
||||||
"best_price": {
|
|
||||||
"best_price_flex",
|
|
||||||
"best_price_min_distance_from_avg",
|
|
||||||
"best_price_min_period_length",
|
|
||||||
"min_periods_best",
|
|
||||||
"relaxation_attempts_best",
|
|
||||||
"enable_min_periods_best",
|
|
||||||
},
|
|
||||||
"peak_price": {
|
|
||||||
"peak_price_flex",
|
|
||||||
"peak_price_min_distance_from_avg",
|
|
||||||
"peak_price_min_period_length",
|
|
||||||
"min_periods_peak",
|
|
||||||
"relaxation_attempts_peak",
|
|
||||||
"enable_min_periods_peak",
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
keys_to_check = step_keys.get(step_id, set())
|
async def async_step_init(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
||||||
enabled_keys = overrides.get("_enabled", {})
|
"""Manage the options - General Settings."""
|
||||||
override_count = sum(1 for k in enabled_keys if k in keys_to_check)
|
# Initialize options from config_entry on first call
|
||||||
|
if not self._options:
|
||||||
|
# Migrate deprecated config options before processing
|
||||||
|
self._options = self._migrate_config_options(self.config_entry.options)
|
||||||
|
|
||||||
if override_count > 0:
|
|
||||||
field_word = "field is" if override_count == 1 else "fields are"
|
|
||||||
return {
|
|
||||||
"override_warning": (
|
|
||||||
f"\n\n🔒 **{override_count} {field_word} managed by configuration entities** "
|
|
||||||
"(grayed out). Disable the config entity to edit here, "
|
|
||||||
"or change the value directly via the entity."
|
|
||||||
)
|
|
||||||
}
|
|
||||||
return {"override_warning": ""}
|
|
||||||
|
|
||||||
async def _get_override_translations(self) -> dict[str, Any]:
|
|
||||||
"""
|
|
||||||
Load override translations from common section.
|
|
||||||
|
|
||||||
Uses the system language setting from Home Assistant.
|
|
||||||
Note: HA Options Flow does not provide user_id in context,
|
|
||||||
so we cannot determine the individual user's language preference.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dictionary with override_warning_template, override_warning_and,
|
|
||||||
and override_field_label_* keys for each config field.
|
|
||||||
|
|
||||||
"""
|
|
||||||
# Use system language - HA Options Flow context doesn't include user_id
|
|
||||||
language = self.hass.config.language or "en"
|
|
||||||
_LOGGER.debug("Loading override translations for language: %s", language)
|
|
||||||
translations: dict[str, Any] = {}
|
|
||||||
|
|
||||||
# Load template and connector from common section
|
|
||||||
template = await async_get_translation(self.hass, ["common", "override_warning_template"], language)
|
|
||||||
_LOGGER.debug("Loaded template: %s", template)
|
|
||||||
if template:
|
|
||||||
translations["override_warning_template"] = template
|
|
||||||
|
|
||||||
and_connector = await async_get_translation(self.hass, ["common", "override_warning_and"], language)
|
|
||||||
if and_connector:
|
|
||||||
translations["override_warning_and"] = and_connector
|
|
||||||
|
|
||||||
# Load flat field label translations
|
|
||||||
field_keys = [
|
|
||||||
"best_price_min_period_length",
|
|
||||||
"best_price_max_level_gap_count",
|
|
||||||
"best_price_flex",
|
|
||||||
"best_price_min_distance_from_avg",
|
|
||||||
"enable_min_periods_best",
|
|
||||||
"min_periods_best",
|
|
||||||
"relaxation_attempts_best",
|
|
||||||
"peak_price_min_period_length",
|
|
||||||
"peak_price_max_level_gap_count",
|
|
||||||
"peak_price_flex",
|
|
||||||
"peak_price_min_distance_from_avg",
|
|
||||||
"enable_min_periods_peak",
|
|
||||||
"min_periods_peak",
|
|
||||||
"relaxation_attempts_peak",
|
|
||||||
]
|
|
||||||
for field_key in field_keys:
|
|
||||||
translation_key = f"override_field_label_{field_key}"
|
|
||||||
label = await async_get_translation(self.hass, ["common", translation_key], language)
|
|
||||||
if label:
|
|
||||||
translations[translation_key] = label
|
|
||||||
|
|
||||||
return translations
|
|
||||||
|
|
||||||
async def async_step_init(self, _user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
|
||||||
"""Manage the options - show menu."""
|
|
||||||
# Always reload options from config_entry to get latest saved state
|
|
||||||
# This ensures changes from previous steps are visible
|
|
||||||
self._options = self._migrate_config_options(self.config_entry.options)
|
|
||||||
|
|
||||||
# Show menu with all configuration categories
|
|
||||||
return self.async_show_menu(
|
|
||||||
step_id="init",
|
|
||||||
menu_options=[
|
|
||||||
"general_settings",
|
|
||||||
"display_settings",
|
|
||||||
"current_interval_price_rating",
|
|
||||||
"price_level",
|
|
||||||
"volatility",
|
|
||||||
"best_price",
|
|
||||||
"peak_price",
|
|
||||||
"price_trend",
|
|
||||||
"chart_data_export",
|
|
||||||
"reset_to_defaults",
|
|
||||||
"finish",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
async def async_step_reset_to_defaults(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
|
||||||
"""Reset all settings to factory defaults."""
|
|
||||||
if user_input is not None:
|
if user_input is not None:
|
||||||
# Check if user confirmed the reset
|
|
||||||
if user_input.get("confirm_reset", False):
|
|
||||||
# Get currency from config_entry.data (this is immutable and safe)
|
|
||||||
currency_code = self.config_entry.data.get("currency", None)
|
|
||||||
|
|
||||||
# Completely replace options with fresh defaults (factory reset)
|
|
||||||
# This discards ALL old data including legacy structures
|
|
||||||
self._options = get_default_options(currency_code)
|
|
||||||
|
|
||||||
# Force save the new options
|
|
||||||
self._save_options_if_changed()
|
|
||||||
|
|
||||||
_LOGGER.info(
|
|
||||||
"Factory reset performed for config entry '%s' - all settings restored to defaults",
|
|
||||||
self.config_entry.title,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Show success message and return to menu
|
|
||||||
return self.async_abort(reason="reset_successful")
|
|
||||||
|
|
||||||
# User didn't check the box - they want to cancel
|
|
||||||
# Show info message (not error) and return to menu
|
|
||||||
return self.async_abort(reason="reset_cancelled")
|
|
||||||
|
|
||||||
# Show confirmation form with checkbox
|
|
||||||
return self.async_show_form(
|
|
||||||
step_id="reset_to_defaults",
|
|
||||||
data_schema=get_reset_to_defaults_schema(),
|
|
||||||
)
|
|
||||||
|
|
||||||
async def async_step_finish(self, _user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
|
||||||
"""Close the options flow."""
|
|
||||||
# Use empty reason to close without any message
|
|
||||||
return self.async_abort(reason="finished")
|
|
||||||
|
|
||||||
async def async_step_general_settings(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
|
||||||
"""Configure general settings."""
|
|
||||||
if user_input is not None:
|
|
||||||
# Update options with new values
|
|
||||||
self._options.update(user_input)
|
self._options.update(user_input)
|
||||||
# Save options only if changed (triggers listeners automatically)
|
return await self.async_step_current_interval_price_rating()
|
||||||
self._save_options_if_changed()
|
|
||||||
# Return to menu for more changes
|
|
||||||
return await self.async_step_init()
|
|
||||||
|
|
||||||
return self.async_show_form(
|
return self.async_show_form(
|
||||||
step_id="general_settings",
|
step_id="init",
|
||||||
data_schema=get_options_init_schema(self.config_entry.options),
|
data_schema=get_options_init_schema(self.config_entry.options),
|
||||||
description_placeholders={
|
description_placeholders={
|
||||||
|
**self._get_step_description_placeholders("init"),
|
||||||
"user_login": self.config_entry.data.get("user_login", "N/A"),
|
"user_login": self.config_entry.data.get("user_login", "N/A"),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
async def async_step_display_settings(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
|
||||||
"""Configure currency display settings."""
|
|
||||||
# Get currency from coordinator data (if available)
|
|
||||||
# During options flow setup, integration might not be fully loaded yet
|
|
||||||
currency_code = None
|
|
||||||
if DOMAIN in self.hass.data and self.config_entry.entry_id in self.hass.data[DOMAIN]:
|
|
||||||
tibber_data = self.hass.data[DOMAIN][self.config_entry.entry_id]
|
|
||||||
if tibber_data.coordinator.data:
|
|
||||||
currency_code = tibber_data.coordinator.data.get("currency")
|
|
||||||
|
|
||||||
if user_input is not None:
|
|
||||||
# Update options with new values
|
|
||||||
self._options.update(user_input)
|
|
||||||
# async_create_entry automatically handles change detection and listener triggering
|
|
||||||
self._save_options_if_changed()
|
|
||||||
# Return to menu for more changes
|
|
||||||
return await self.async_step_init()
|
|
||||||
|
|
||||||
return self.async_show_form(
|
|
||||||
step_id="display_settings",
|
|
||||||
data_schema=get_display_settings_schema(self.config_entry.options, currency_code),
|
|
||||||
)
|
|
||||||
|
|
||||||
async def async_step_current_interval_price_rating(
|
async def async_step_current_interval_price_rating(
|
||||||
self, user_input: dict[str, Any] | None = None
|
self, user_input: dict[str, Any] | None = None
|
||||||
) -> ConfigFlowResult:
|
) -> ConfigFlowResult:
|
||||||
|
|
@ -516,9 +188,6 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
||||||
errors: dict[str, str] = {}
|
errors: dict[str, str] = {}
|
||||||
|
|
||||||
if user_input is not None:
|
if user_input is not None:
|
||||||
# Schema is now flattened - fields come directly in user_input
|
|
||||||
# But we still need to store them in nested structure for coordinator
|
|
||||||
|
|
||||||
# Validate low price rating threshold
|
# Validate low price rating threshold
|
||||||
if CONF_PRICE_RATING_THRESHOLD_LOW in user_input and not validate_price_rating_threshold_low(
|
if CONF_PRICE_RATING_THRESHOLD_LOW in user_input and not validate_price_rating_threshold_low(
|
||||||
user_input[CONF_PRICE_RATING_THRESHOLD_LOW]
|
user_input[CONF_PRICE_RATING_THRESHOLD_LOW]
|
||||||
|
|
@ -532,51 +201,26 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
||||||
errors[CONF_PRICE_RATING_THRESHOLD_HIGH] = "invalid_price_rating_high"
|
errors[CONF_PRICE_RATING_THRESHOLD_HIGH] = "invalid_price_rating_high"
|
||||||
|
|
||||||
# Cross-validate both thresholds together (LOW must be < HIGH)
|
# Cross-validate both thresholds together (LOW must be < HIGH)
|
||||||
if not errors:
|
if not errors and not validate_price_rating_thresholds(
|
||||||
# Get current values directly from options (now flat)
|
user_input.get(
|
||||||
low_val = user_input.get(
|
|
||||||
CONF_PRICE_RATING_THRESHOLD_LOW, self._options.get(CONF_PRICE_RATING_THRESHOLD_LOW, -10)
|
CONF_PRICE_RATING_THRESHOLD_LOW, self._options.get(CONF_PRICE_RATING_THRESHOLD_LOW, -10)
|
||||||
)
|
),
|
||||||
high_val = user_input.get(
|
user_input.get(
|
||||||
CONF_PRICE_RATING_THRESHOLD_HIGH, self._options.get(CONF_PRICE_RATING_THRESHOLD_HIGH, 10)
|
CONF_PRICE_RATING_THRESHOLD_HIGH, self._options.get(CONF_PRICE_RATING_THRESHOLD_HIGH, 10)
|
||||||
)
|
),
|
||||||
if not validate_price_rating_thresholds(low_val, high_val):
|
):
|
||||||
# This should never happen given the range constraints, but add error for safety
|
# This should never happen given the range constraints, but add error for safety
|
||||||
errors["base"] = "invalid_price_rating_thresholds"
|
errors["base"] = "invalid_price_rating_thresholds"
|
||||||
|
|
||||||
if not errors:
|
if not errors:
|
||||||
# Store flat data directly in options (no section wrapping)
|
|
||||||
self._options.update(user_input)
|
self._options.update(user_input)
|
||||||
# async_create_entry automatically handles change detection and listener triggering
|
return await self.async_step_volatility()
|
||||||
self._save_options_if_changed()
|
|
||||||
# Return to menu for more changes
|
|
||||||
return await self.async_step_init()
|
|
||||||
|
|
||||||
return self.async_show_form(
|
return self.async_show_form(
|
||||||
step_id="current_interval_price_rating",
|
step_id="current_interval_price_rating",
|
||||||
data_schema=get_price_rating_schema(self.config_entry.options),
|
data_schema=get_price_rating_schema(self.config_entry.options),
|
||||||
|
description_placeholders=self._get_step_description_placeholders("current_interval_price_rating"),
|
||||||
errors=errors,
|
errors=errors,
|
||||||
description_placeholders=self._get_entity_warning_placeholders("current_interval_price_rating"),
|
|
||||||
)
|
|
||||||
|
|
||||||
async def async_step_price_level(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
|
||||||
"""Configure Tibber price level gap tolerance (smoothing for API 'level' field)."""
|
|
||||||
errors: dict[str, str] = {}
|
|
||||||
|
|
||||||
if user_input is not None:
|
|
||||||
# No validation needed - slider constraints ensure valid range
|
|
||||||
# Store flat data directly in options
|
|
||||||
self._options.update(user_input)
|
|
||||||
# async_create_entry automatically handles change detection and listener triggering
|
|
||||||
self._save_options_if_changed()
|
|
||||||
# Return to menu for more changes
|
|
||||||
return await self.async_step_init()
|
|
||||||
|
|
||||||
return self.async_show_form(
|
|
||||||
step_id="price_level",
|
|
||||||
data_schema=get_price_level_schema(self.config_entry.options),
|
|
||||||
errors=errors,
|
|
||||||
description_placeholders=self._get_entity_warning_placeholders("price_level"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
async def async_step_best_price(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
async def async_step_best_price(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
||||||
|
|
@ -584,74 +228,47 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
||||||
errors: dict[str, str] = {}
|
errors: dict[str, str] = {}
|
||||||
|
|
||||||
if user_input is not None:
|
if user_input is not None:
|
||||||
# Extract settings from sections
|
|
||||||
period_settings = user_input.get("period_settings", {})
|
|
||||||
flexibility_settings = user_input.get("flexibility_settings", {})
|
|
||||||
relaxation_settings = user_input.get("relaxation_and_target_periods", {})
|
|
||||||
|
|
||||||
# Validate period length
|
# Validate period length
|
||||||
if CONF_BEST_PRICE_MIN_PERIOD_LENGTH in period_settings and not validate_period_length(
|
if CONF_BEST_PRICE_MIN_PERIOD_LENGTH in user_input and not validate_period_length(
|
||||||
period_settings[CONF_BEST_PRICE_MIN_PERIOD_LENGTH]
|
user_input[CONF_BEST_PRICE_MIN_PERIOD_LENGTH]
|
||||||
):
|
):
|
||||||
errors[CONF_BEST_PRICE_MIN_PERIOD_LENGTH] = "invalid_period_length"
|
errors[CONF_BEST_PRICE_MIN_PERIOD_LENGTH] = "invalid_period_length"
|
||||||
|
|
||||||
# Validate flex percentage
|
# Validate flex percentage
|
||||||
if CONF_BEST_PRICE_FLEX in flexibility_settings and not validate_flex_percentage(
|
if CONF_BEST_PRICE_FLEX in user_input and not validate_flex_percentage(user_input[CONF_BEST_PRICE_FLEX]):
|
||||||
flexibility_settings[CONF_BEST_PRICE_FLEX]
|
|
||||||
):
|
|
||||||
errors[CONF_BEST_PRICE_FLEX] = "invalid_flex"
|
errors[CONF_BEST_PRICE_FLEX] = "invalid_flex"
|
||||||
|
|
||||||
# Validate distance from average (Best Price uses negative values)
|
# Validate distance from average (Best Price uses negative values)
|
||||||
if (
|
if CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG in user_input and not validate_best_price_distance_percentage(
|
||||||
CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG in flexibility_settings
|
user_input[CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG]
|
||||||
and not validate_best_price_distance_percentage(
|
|
||||||
flexibility_settings[CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG]
|
|
||||||
)
|
|
||||||
):
|
):
|
||||||
errors[CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG] = "invalid_best_price_distance"
|
errors[CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG] = "invalid_best_price_distance"
|
||||||
|
|
||||||
# Validate minimum periods count
|
# Validate minimum periods count
|
||||||
if CONF_MIN_PERIODS_BEST in relaxation_settings and not validate_min_periods(
|
if CONF_MIN_PERIODS_BEST in user_input and not validate_min_periods(user_input[CONF_MIN_PERIODS_BEST]):
|
||||||
relaxation_settings[CONF_MIN_PERIODS_BEST]
|
|
||||||
):
|
|
||||||
errors[CONF_MIN_PERIODS_BEST] = "invalid_min_periods"
|
errors[CONF_MIN_PERIODS_BEST] = "invalid_min_periods"
|
||||||
|
|
||||||
# Validate gap count
|
# Validate gap count
|
||||||
if CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT in period_settings and not validate_gap_count(
|
if CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT in user_input and not validate_gap_count(
|
||||||
period_settings[CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT]
|
user_input[CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT]
|
||||||
):
|
):
|
||||||
errors[CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT] = "invalid_gap_count"
|
errors[CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT] = "invalid_gap_count"
|
||||||
|
|
||||||
# Validate relaxation attempts
|
# Validate relaxation attempts
|
||||||
if CONF_RELAXATION_ATTEMPTS_BEST in relaxation_settings and not validate_relaxation_attempts(
|
if CONF_RELAXATION_ATTEMPTS_BEST in user_input and not validate_relaxation_attempts(
|
||||||
relaxation_settings[CONF_RELAXATION_ATTEMPTS_BEST]
|
user_input[CONF_RELAXATION_ATTEMPTS_BEST]
|
||||||
):
|
):
|
||||||
errors[CONF_RELAXATION_ATTEMPTS_BEST] = "invalid_relaxation_attempts"
|
errors[CONF_RELAXATION_ATTEMPTS_BEST] = "invalid_relaxation_attempts"
|
||||||
|
|
||||||
if not errors:
|
if not errors:
|
||||||
# Merge section data into options
|
self._options.update(user_input)
|
||||||
self._merge_section_data(user_input)
|
return await self.async_step_peak_price()
|
||||||
# async_create_entry automatically handles change detection and listener triggering
|
|
||||||
self._save_options_if_changed()
|
|
||||||
# Return to menu for more changes
|
|
||||||
return await self.async_step_init()
|
|
||||||
|
|
||||||
overrides = self._get_active_overrides()
|
|
||||||
placeholders = self._get_entity_warning_placeholders("best_price")
|
|
||||||
placeholders.update(self._get_override_warning_placeholder("best_price", overrides))
|
|
||||||
|
|
||||||
# Load translations for override warnings
|
|
||||||
override_translations = await self._get_override_translations()
|
|
||||||
|
|
||||||
return self.async_show_form(
|
return self.async_show_form(
|
||||||
step_id="best_price",
|
step_id="best_price",
|
||||||
data_schema=get_best_price_schema(
|
data_schema=get_best_price_schema(self.config_entry.options),
|
||||||
self.config_entry.options,
|
description_placeholders=self._get_step_description_placeholders("best_price"),
|
||||||
overrides=overrides,
|
|
||||||
translations=override_translations,
|
|
||||||
),
|
|
||||||
errors=errors,
|
errors=errors,
|
||||||
description_placeholders=placeholders,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
async def async_step_peak_price(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
async def async_step_peak_price(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
||||||
|
|
@ -659,71 +276,47 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
||||||
errors: dict[str, str] = {}
|
errors: dict[str, str] = {}
|
||||||
|
|
||||||
if user_input is not None:
|
if user_input is not None:
|
||||||
# Extract settings from sections
|
|
||||||
period_settings = user_input.get("period_settings", {})
|
|
||||||
flexibility_settings = user_input.get("flexibility_settings", {})
|
|
||||||
relaxation_settings = user_input.get("relaxation_and_target_periods", {})
|
|
||||||
|
|
||||||
# Validate period length
|
# Validate period length
|
||||||
if CONF_PEAK_PRICE_MIN_PERIOD_LENGTH in period_settings and not validate_period_length(
|
if CONF_PEAK_PRICE_MIN_PERIOD_LENGTH in user_input and not validate_period_length(
|
||||||
period_settings[CONF_PEAK_PRICE_MIN_PERIOD_LENGTH]
|
user_input[CONF_PEAK_PRICE_MIN_PERIOD_LENGTH]
|
||||||
):
|
):
|
||||||
errors[CONF_PEAK_PRICE_MIN_PERIOD_LENGTH] = "invalid_period_length"
|
errors[CONF_PEAK_PRICE_MIN_PERIOD_LENGTH] = "invalid_period_length"
|
||||||
|
|
||||||
# Validate flex percentage (peak uses negative values)
|
# Validate flex percentage (peak uses negative values)
|
||||||
if CONF_PEAK_PRICE_FLEX in flexibility_settings and not validate_flex_percentage(
|
if CONF_PEAK_PRICE_FLEX in user_input and not validate_flex_percentage(user_input[CONF_PEAK_PRICE_FLEX]):
|
||||||
flexibility_settings[CONF_PEAK_PRICE_FLEX]
|
|
||||||
):
|
|
||||||
errors[CONF_PEAK_PRICE_FLEX] = "invalid_flex"
|
errors[CONF_PEAK_PRICE_FLEX] = "invalid_flex"
|
||||||
|
|
||||||
# Validate distance from average (Peak Price uses positive values)
|
# Validate distance from average (Peak Price uses positive values)
|
||||||
if CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG in flexibility_settings and not validate_distance_percentage(
|
if CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG in user_input and not validate_distance_percentage(
|
||||||
flexibility_settings[CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG]
|
user_input[CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG]
|
||||||
):
|
):
|
||||||
errors[CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG] = "invalid_peak_price_distance"
|
errors[CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG] = "invalid_peak_price_distance"
|
||||||
|
|
||||||
# Validate minimum periods count
|
# Validate minimum periods count
|
||||||
if CONF_MIN_PERIODS_PEAK in relaxation_settings and not validate_min_periods(
|
if CONF_MIN_PERIODS_PEAK in user_input and not validate_min_periods(user_input[CONF_MIN_PERIODS_PEAK]):
|
||||||
relaxation_settings[CONF_MIN_PERIODS_PEAK]
|
|
||||||
):
|
|
||||||
errors[CONF_MIN_PERIODS_PEAK] = "invalid_min_periods"
|
errors[CONF_MIN_PERIODS_PEAK] = "invalid_min_periods"
|
||||||
|
|
||||||
# Validate gap count
|
# Validate gap count
|
||||||
if CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT in period_settings and not validate_gap_count(
|
if CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT in user_input and not validate_gap_count(
|
||||||
period_settings[CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT]
|
user_input[CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT]
|
||||||
):
|
):
|
||||||
errors[CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT] = "invalid_gap_count"
|
errors[CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT] = "invalid_gap_count"
|
||||||
|
|
||||||
# Validate relaxation attempts
|
# Validate relaxation attempts
|
||||||
if CONF_RELAXATION_ATTEMPTS_PEAK in relaxation_settings and not validate_relaxation_attempts(
|
if CONF_RELAXATION_ATTEMPTS_PEAK in user_input and not validate_relaxation_attempts(
|
||||||
relaxation_settings[CONF_RELAXATION_ATTEMPTS_PEAK]
|
user_input[CONF_RELAXATION_ATTEMPTS_PEAK]
|
||||||
):
|
):
|
||||||
errors[CONF_RELAXATION_ATTEMPTS_PEAK] = "invalid_relaxation_attempts"
|
errors[CONF_RELAXATION_ATTEMPTS_PEAK] = "invalid_relaxation_attempts"
|
||||||
|
|
||||||
if not errors:
|
if not errors:
|
||||||
# Merge section data into options
|
self._options.update(user_input)
|
||||||
self._merge_section_data(user_input)
|
return await self.async_step_price_trend()
|
||||||
# async_create_entry automatically handles change detection and listener triggering
|
|
||||||
self._save_options_if_changed()
|
|
||||||
# Return to menu for more changes
|
|
||||||
return await self.async_step_init()
|
|
||||||
|
|
||||||
overrides = self._get_active_overrides()
|
|
||||||
placeholders = self._get_entity_warning_placeholders("peak_price")
|
|
||||||
placeholders.update(self._get_override_warning_placeholder("peak_price", overrides))
|
|
||||||
|
|
||||||
# Load translations for override warnings
|
|
||||||
override_translations = await self._get_override_translations()
|
|
||||||
|
|
||||||
return self.async_show_form(
|
return self.async_show_form(
|
||||||
step_id="peak_price",
|
step_id="peak_price",
|
||||||
data_schema=get_peak_price_schema(
|
data_schema=get_peak_price_schema(self.config_entry.options),
|
||||||
self.config_entry.options,
|
description_placeholders=self._get_step_description_placeholders("peak_price"),
|
||||||
overrides=overrides,
|
|
||||||
translations=override_translations,
|
|
||||||
),
|
|
||||||
errors=errors,
|
errors=errors,
|
||||||
description_placeholders=placeholders,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
async def async_step_price_trend(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
async def async_step_price_trend(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
||||||
|
|
@ -731,9 +324,6 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
||||||
errors: dict[str, str] = {}
|
errors: dict[str, str] = {}
|
||||||
|
|
||||||
if user_input is not None:
|
if user_input is not None:
|
||||||
# Schema is now flattened - fields come directly in user_input
|
|
||||||
# Store them flat in options (no nested structure)
|
|
||||||
|
|
||||||
# Validate rising trend threshold
|
# Validate rising trend threshold
|
||||||
if CONF_PRICE_TREND_THRESHOLD_RISING in user_input and not validate_price_trend_rising(
|
if CONF_PRICE_TREND_THRESHOLD_RISING in user_input and not validate_price_trend_rising(
|
||||||
user_input[CONF_PRICE_TREND_THRESHOLD_RISING]
|
user_input[CONF_PRICE_TREND_THRESHOLD_RISING]
|
||||||
|
|
@ -746,93 +336,28 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
||||||
):
|
):
|
||||||
errors[CONF_PRICE_TREND_THRESHOLD_FALLING] = "invalid_price_trend_falling"
|
errors[CONF_PRICE_TREND_THRESHOLD_FALLING] = "invalid_price_trend_falling"
|
||||||
|
|
||||||
# Validate strongly rising trend threshold
|
|
||||||
if CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING in user_input and not validate_price_trend_strongly_rising(
|
|
||||||
user_input[CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING]
|
|
||||||
):
|
|
||||||
errors[CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING] = "invalid_price_trend_strongly_rising"
|
|
||||||
|
|
||||||
# Validate strongly falling trend threshold
|
|
||||||
if CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING in user_input and not validate_price_trend_strongly_falling(
|
|
||||||
user_input[CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING]
|
|
||||||
):
|
|
||||||
errors[CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING] = "invalid_price_trend_strongly_falling"
|
|
||||||
|
|
||||||
# Cross-validation: Ensure rising < strongly_rising and falling > strongly_falling
|
|
||||||
if not errors:
|
if not errors:
|
||||||
rising = user_input.get(CONF_PRICE_TREND_THRESHOLD_RISING)
|
|
||||||
strongly_rising = user_input.get(CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING)
|
|
||||||
falling = user_input.get(CONF_PRICE_TREND_THRESHOLD_FALLING)
|
|
||||||
strongly_falling = user_input.get(CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING)
|
|
||||||
|
|
||||||
if rising is not None and strongly_rising is not None and rising >= strongly_rising:
|
|
||||||
errors[CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING] = (
|
|
||||||
"invalid_trend_strongly_rising_less_than_rising"
|
|
||||||
)
|
|
||||||
if falling is not None and strongly_falling is not None and falling <= strongly_falling:
|
|
||||||
errors[CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING] = (
|
|
||||||
"invalid_trend_strongly_falling_greater_than_falling"
|
|
||||||
)
|
|
||||||
|
|
||||||
if not errors:
|
|
||||||
# Store flat data directly in options (no section wrapping)
|
|
||||||
self._options.update(user_input)
|
self._options.update(user_input)
|
||||||
# async_create_entry automatically handles change detection and listener triggering
|
return await self.async_step_chart_data_export()
|
||||||
self._save_options_if_changed()
|
|
||||||
# Return to menu for more changes
|
|
||||||
return await self.async_step_init()
|
|
||||||
|
|
||||||
return self.async_show_form(
|
return self.async_show_form(
|
||||||
step_id="price_trend",
|
step_id="price_trend",
|
||||||
data_schema=get_price_trend_schema(self.config_entry.options),
|
data_schema=get_price_trend_schema(self.config_entry.options),
|
||||||
|
description_placeholders=self._get_step_description_placeholders("price_trend"),
|
||||||
errors=errors,
|
errors=errors,
|
||||||
description_placeholders=self._get_entity_warning_placeholders("price_trend"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
async def async_step_chart_data_export(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
async def async_step_chart_data_export(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
||||||
"""Info page for chart data export sensor."""
|
"""Info page for chart data export sensor."""
|
||||||
if user_input is not None:
|
if user_input is not None:
|
||||||
# No changes to save - just return to menu
|
# No validation needed - just an info page
|
||||||
return await self.async_step_init()
|
return self.async_create_entry(title="", data=self._options)
|
||||||
|
|
||||||
# Check if the chart data export sensor is enabled
|
# Show info-only form (no input fields)
|
||||||
is_enabled = check_chart_data_export_enabled(self.hass, self.config_entry)
|
|
||||||
|
|
||||||
# Show info-only form with status-dependent description
|
|
||||||
return self.async_show_form(
|
return self.async_show_form(
|
||||||
step_id="chart_data_export",
|
step_id="chart_data_export",
|
||||||
data_schema=get_chart_data_export_schema(self.config_entry.options),
|
data_schema=get_chart_data_export_schema(self.config_entry.options),
|
||||||
description_placeholders={
|
description_placeholders=self._get_step_description_placeholders("chart_data_export"),
|
||||||
"sensor_status_info": self._get_chart_export_status_info(is_enabled=is_enabled),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
def _get_chart_export_status_info(self, *, is_enabled: bool) -> str:
|
|
||||||
"""Get the status info block for chart data export sensor."""
|
|
||||||
if is_enabled:
|
|
||||||
return (
|
|
||||||
"✅ **Status: Sensor is enabled**\n\n"
|
|
||||||
"The Chart Data Export sensor is currently active and providing data as attributes.\n\n"
|
|
||||||
"**Configuration (optional):**\n\n"
|
|
||||||
"Default settings work out-of-the-box (today+tomorrow, 15-minute intervals, prices only).\n\n"
|
|
||||||
"For customization, add to **`configuration.yaml`**:\n\n"
|
|
||||||
"```yaml\n"
|
|
||||||
"tibber_prices:\n"
|
|
||||||
" chart_export:\n"
|
|
||||||
" day:\n"
|
|
||||||
" - today\n"
|
|
||||||
" - tomorrow\n"
|
|
||||||
" include_level: true\n"
|
|
||||||
" include_rating_level: true\n"
|
|
||||||
"```\n\n"
|
|
||||||
"**All parameters:** See `tibber_prices.get_chartdata` service documentation"
|
|
||||||
)
|
|
||||||
return (
|
|
||||||
"❌ **Status: Sensor is disabled**\n\n"
|
|
||||||
"**Enable the sensor:**\n\n"
|
|
||||||
"1. Open **Settings → Devices & Services → Tibber Prices**\n"
|
|
||||||
"2. Select your home → Find **'Chart Data Export'** (Diagnostic section)\n"
|
|
||||||
"3. **Enable the sensor** (disabled by default)"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
async def async_step_volatility(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
async def async_step_volatility(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
||||||
|
|
@ -840,8 +365,6 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
||||||
errors: dict[str, str] = {}
|
errors: dict[str, str] = {}
|
||||||
|
|
||||||
if user_input is not None:
|
if user_input is not None:
|
||||||
# Schema is now flattened - fields come directly in user_input
|
|
||||||
|
|
||||||
# Validate moderate volatility threshold
|
# Validate moderate volatility threshold
|
||||||
if CONF_VOLATILITY_THRESHOLD_MODERATE in user_input and not validate_volatility_threshold_moderate(
|
if CONF_VOLATILITY_THRESHOLD_MODERATE in user_input and not validate_volatility_threshold_moderate(
|
||||||
user_input[CONF_VOLATILITY_THRESHOLD_MODERATE]
|
user_input[CONF_VOLATILITY_THRESHOLD_MODERATE]
|
||||||
|
|
@ -862,34 +385,30 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
||||||
|
|
||||||
# Cross-validation: Ensure MODERATE < HIGH < VERY_HIGH
|
# Cross-validation: Ensure MODERATE < HIGH < VERY_HIGH
|
||||||
if not errors:
|
if not errors:
|
||||||
# Get current values directly from options (now flat)
|
existing_options = self.config_entry.options
|
||||||
moderate = user_input.get(
|
moderate = user_input.get(
|
||||||
CONF_VOLATILITY_THRESHOLD_MODERATE,
|
CONF_VOLATILITY_THRESHOLD_MODERATE,
|
||||||
self._options.get(CONF_VOLATILITY_THRESHOLD_MODERATE, DEFAULT_VOLATILITY_THRESHOLD_MODERATE),
|
existing_options.get(CONF_VOLATILITY_THRESHOLD_MODERATE, DEFAULT_VOLATILITY_THRESHOLD_MODERATE),
|
||||||
)
|
)
|
||||||
high = user_input.get(
|
high = user_input.get(
|
||||||
CONF_VOLATILITY_THRESHOLD_HIGH,
|
CONF_VOLATILITY_THRESHOLD_HIGH,
|
||||||
self._options.get(CONF_VOLATILITY_THRESHOLD_HIGH, DEFAULT_VOLATILITY_THRESHOLD_HIGH),
|
existing_options.get(CONF_VOLATILITY_THRESHOLD_HIGH, DEFAULT_VOLATILITY_THRESHOLD_HIGH),
|
||||||
)
|
)
|
||||||
very_high = user_input.get(
|
very_high = user_input.get(
|
||||||
CONF_VOLATILITY_THRESHOLD_VERY_HIGH,
|
CONF_VOLATILITY_THRESHOLD_VERY_HIGH,
|
||||||
self._options.get(CONF_VOLATILITY_THRESHOLD_VERY_HIGH, DEFAULT_VOLATILITY_THRESHOLD_VERY_HIGH),
|
existing_options.get(CONF_VOLATILITY_THRESHOLD_VERY_HIGH, DEFAULT_VOLATILITY_THRESHOLD_VERY_HIGH),
|
||||||
)
|
)
|
||||||
|
|
||||||
if not validate_volatility_thresholds(moderate, high, very_high):
|
if not validate_volatility_thresholds(moderate, high, very_high):
|
||||||
errors["base"] = "invalid_volatility_thresholds"
|
errors["base"] = "invalid_volatility_thresholds"
|
||||||
|
|
||||||
if not errors:
|
if not errors:
|
||||||
# Store flat data directly in options (no section wrapping)
|
|
||||||
self._options.update(user_input)
|
self._options.update(user_input)
|
||||||
# async_create_entry automatically handles change detection and listener triggering
|
return await self.async_step_best_price()
|
||||||
self._save_options_if_changed()
|
|
||||||
# Return to menu for more changes
|
|
||||||
return await self.async_step_init()
|
|
||||||
|
|
||||||
return self.async_show_form(
|
return self.async_show_form(
|
||||||
step_id="volatility",
|
step_id="volatility",
|
||||||
data_schema=get_volatility_schema(self.config_entry.options),
|
data_schema=get_volatility_schema(self.config_entry.options),
|
||||||
|
description_placeholders=self._get_step_description_placeholders("volatility"),
|
||||||
errors=errors,
|
errors=errors,
|
||||||
description_placeholders=self._get_entity_warning_placeholders("volatility"),
|
|
||||||
)
|
)
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load diff
|
|
@ -125,9 +125,6 @@ class TibberPricesSubentryFlowHandler(ConfigSubentryFlow):
|
||||||
offset_desc = self._format_offset_description(offset_days, offset_hours, offset_minutes)
|
offset_desc = self._format_offset_description(offset_days, offset_hours, offset_minutes)
|
||||||
subentry_title = f"{parent_entry.title} ({offset_desc})"
|
subentry_title = f"{parent_entry.title} ({offset_desc})"
|
||||||
|
|
||||||
# Note: Subentries inherit options from parent entry automatically
|
|
||||||
# Options parameter is not supported by ConfigSubentryFlow.async_create_entry()
|
|
||||||
|
|
||||||
return self.async_create_entry(
|
return self.async_create_entry(
|
||||||
title=subentry_title,
|
title=subentry_title,
|
||||||
data={
|
data={
|
||||||
|
|
|
||||||
|
|
@ -20,12 +20,7 @@ from custom_components.tibber_prices.config_flow_handlers.validators import (
|
||||||
TibberPricesInvalidAuthError,
|
TibberPricesInvalidAuthError,
|
||||||
validate_api_token,
|
validate_api_token,
|
||||||
)
|
)
|
||||||
from custom_components.tibber_prices.const import (
|
from custom_components.tibber_prices.const import DOMAIN, LOGGER, get_translation
|
||||||
DOMAIN,
|
|
||||||
LOGGER,
|
|
||||||
get_default_options,
|
|
||||||
get_translation,
|
|
||||||
)
|
|
||||||
from homeassistant.config_entries import (
|
from homeassistant.config_entries import (
|
||||||
ConfigEntry,
|
ConfigEntry,
|
||||||
ConfigFlow,
|
ConfigFlow,
|
||||||
|
|
@ -141,7 +136,6 @@ class TibberPricesConfigFlowHandler(ConfigFlow, domain=DOMAIN):
|
||||||
step_id="reauth_confirm",
|
step_id="reauth_confirm",
|
||||||
data_schema=get_reauth_confirm_schema(),
|
data_schema=get_reauth_confirm_schema(),
|
||||||
errors=_errors,
|
errors=_errors,
|
||||||
description_placeholders={"tibber_url": "https://developer.tibber.com"},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
async def async_step_user(
|
async def async_step_user(
|
||||||
|
|
@ -292,7 +286,6 @@ class TibberPricesConfigFlowHandler(ConfigFlow, domain=DOMAIN):
|
||||||
step_id="new_token",
|
step_id="new_token",
|
||||||
data_schema=get_user_schema((user_input or {}).get(CONF_ACCESS_TOKEN)),
|
data_schema=get_user_schema((user_input or {}).get(CONF_ACCESS_TOKEN)),
|
||||||
errors=_errors,
|
errors=_errors,
|
||||||
description_placeholders={"tibber_url": "https://developer.tibber.com"},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
async def async_step_select_home(self, user_input: dict | None = None) -> ConfigFlowResult: # noqa: PLR0911
|
async def async_step_select_home(self, user_input: dict | None = None) -> ConfigFlowResult: # noqa: PLR0911
|
||||||
|
|
@ -386,16 +379,6 @@ class TibberPricesConfigFlowHandler(ConfigFlow, domain=DOMAIN):
|
||||||
"user_login": self._user_login or "N/A",
|
"user_login": self._user_login or "N/A",
|
||||||
}
|
}
|
||||||
|
|
||||||
# Extract currency from home data for intelligent defaults
|
|
||||||
currency_code = None
|
|
||||||
if (
|
|
||||||
selected_home
|
|
||||||
and (subscription := selected_home.get("currentSubscription"))
|
|
||||||
and (price_info := subscription.get("priceInfo"))
|
|
||||||
and (current_price := price_info.get("current"))
|
|
||||||
):
|
|
||||||
currency_code = current_price.get("currency")
|
|
||||||
|
|
||||||
# Generate entry title from home address (not appNickname)
|
# Generate entry title from home address (not appNickname)
|
||||||
entry_title = self._get_entry_title(selected_home)
|
entry_title = self._get_entry_title(selected_home)
|
||||||
|
|
||||||
|
|
@ -403,7 +386,6 @@ class TibberPricesConfigFlowHandler(ConfigFlow, domain=DOMAIN):
|
||||||
title=entry_title,
|
title=entry_title,
|
||||||
data=data,
|
data=data,
|
||||||
description=f"{self._user_login} ({self._user_id})",
|
description=f"{self._user_login} ({self._user_id})",
|
||||||
options=get_default_options(currency_code),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
home_options = [
|
home_options = [
|
||||||
|
|
|
||||||
|
|
@ -20,8 +20,6 @@ from custom_components.tibber_prices.const import (
|
||||||
MAX_PRICE_RATING_THRESHOLD_LOW,
|
MAX_PRICE_RATING_THRESHOLD_LOW,
|
||||||
MAX_PRICE_TREND_FALLING,
|
MAX_PRICE_TREND_FALLING,
|
||||||
MAX_PRICE_TREND_RISING,
|
MAX_PRICE_TREND_RISING,
|
||||||
MAX_PRICE_TREND_STRONGLY_FALLING,
|
|
||||||
MAX_PRICE_TREND_STRONGLY_RISING,
|
|
||||||
MAX_RELAXATION_ATTEMPTS,
|
MAX_RELAXATION_ATTEMPTS,
|
||||||
MAX_VOLATILITY_THRESHOLD_HIGH,
|
MAX_VOLATILITY_THRESHOLD_HIGH,
|
||||||
MAX_VOLATILITY_THRESHOLD_MODERATE,
|
MAX_VOLATILITY_THRESHOLD_MODERATE,
|
||||||
|
|
@ -32,8 +30,6 @@ from custom_components.tibber_prices.const import (
|
||||||
MIN_PRICE_RATING_THRESHOLD_LOW,
|
MIN_PRICE_RATING_THRESHOLD_LOW,
|
||||||
MIN_PRICE_TREND_FALLING,
|
MIN_PRICE_TREND_FALLING,
|
||||||
MIN_PRICE_TREND_RISING,
|
MIN_PRICE_TREND_RISING,
|
||||||
MIN_PRICE_TREND_STRONGLY_FALLING,
|
|
||||||
MIN_PRICE_TREND_STRONGLY_RISING,
|
|
||||||
MIN_RELAXATION_ATTEMPTS,
|
MIN_RELAXATION_ATTEMPTS,
|
||||||
MIN_VOLATILITY_THRESHOLD_HIGH,
|
MIN_VOLATILITY_THRESHOLD_HIGH,
|
||||||
MIN_VOLATILITY_THRESHOLD_MODERATE,
|
MIN_VOLATILITY_THRESHOLD_MODERATE,
|
||||||
|
|
@ -341,31 +337,3 @@ def validate_price_trend_falling(threshold: int) -> bool:
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return MIN_PRICE_TREND_FALLING <= threshold <= MAX_PRICE_TREND_FALLING
|
return MIN_PRICE_TREND_FALLING <= threshold <= MAX_PRICE_TREND_FALLING
|
||||||
|
|
||||||
|
|
||||||
def validate_price_trend_strongly_rising(threshold: int) -> bool:
|
|
||||||
"""
|
|
||||||
Validate strongly rising price trend threshold.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
threshold: Strongly rising trend threshold percentage (2 to 100)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if threshold is valid (MIN_PRICE_TREND_STRONGLY_RISING to MAX_PRICE_TREND_STRONGLY_RISING)
|
|
||||||
|
|
||||||
"""
|
|
||||||
return MIN_PRICE_TREND_STRONGLY_RISING <= threshold <= MAX_PRICE_TREND_STRONGLY_RISING
|
|
||||||
|
|
||||||
|
|
||||||
def validate_price_trend_strongly_falling(threshold: int) -> bool:
|
|
||||||
"""
|
|
||||||
Validate strongly falling price trend threshold.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
threshold: Strongly falling trend threshold percentage (-100 to -2)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if threshold is valid (MIN_PRICE_TREND_STRONGLY_FALLING to MAX_PRICE_TREND_STRONGLY_FALLING)
|
|
||||||
|
|
||||||
"""
|
|
||||||
return MIN_PRICE_TREND_STRONGLY_FALLING <= threshold <= MAX_PRICE_TREND_STRONGLY_FALLING
|
|
||||||
|
|
|
||||||
|
|
@ -1,11 +1,10 @@
|
||||||
"""Constants for the Tibber Price Analytics integration."""
|
"""Constants for the Tibber Price Analytics integration."""
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
from collections.abc import Sequence
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import TYPE_CHECKING, Any
|
from typing import Any
|
||||||
|
|
||||||
import aiofiles
|
import aiofiles
|
||||||
|
|
||||||
|
|
@ -15,19 +14,13 @@ from homeassistant.const import (
|
||||||
UnitOfPower,
|
UnitOfPower,
|
||||||
UnitOfTime,
|
UnitOfTime,
|
||||||
)
|
)
|
||||||
|
from homeassistant.core import HomeAssistant
|
||||||
if TYPE_CHECKING:
|
|
||||||
from collections.abc import Sequence
|
|
||||||
|
|
||||||
from homeassistant.config_entries import ConfigEntry
|
|
||||||
from homeassistant.core import HomeAssistant
|
|
||||||
|
|
||||||
DOMAIN = "tibber_prices"
|
DOMAIN = "tibber_prices"
|
||||||
LOGGER = logging.getLogger(__package__)
|
LOGGER = logging.getLogger(__package__)
|
||||||
|
|
||||||
# Data storage keys
|
# Data storage keys
|
||||||
DATA_CHART_CONFIG = "chart_config" # Key for chart export config in hass.data
|
DATA_CHART_CONFIG = "chart_config" # Key for chart export config in hass.data
|
||||||
DATA_CHART_METADATA_CONFIG = "chart_metadata_config" # Key for chart metadata config in hass.data
|
|
||||||
|
|
||||||
# Configuration keys
|
# Configuration keys
|
||||||
CONF_EXTENDED_DESCRIPTIONS = "extended_descriptions"
|
CONF_EXTENDED_DESCRIPTIONS = "extended_descriptions"
|
||||||
|
|
@ -44,14 +37,8 @@ CONF_BEST_PRICE_MIN_PERIOD_LENGTH = "best_price_min_period_length"
|
||||||
CONF_PEAK_PRICE_MIN_PERIOD_LENGTH = "peak_price_min_period_length"
|
CONF_PEAK_PRICE_MIN_PERIOD_LENGTH = "peak_price_min_period_length"
|
||||||
CONF_PRICE_RATING_THRESHOLD_LOW = "price_rating_threshold_low"
|
CONF_PRICE_RATING_THRESHOLD_LOW = "price_rating_threshold_low"
|
||||||
CONF_PRICE_RATING_THRESHOLD_HIGH = "price_rating_threshold_high"
|
CONF_PRICE_RATING_THRESHOLD_HIGH = "price_rating_threshold_high"
|
||||||
CONF_PRICE_RATING_HYSTERESIS = "price_rating_hysteresis"
|
|
||||||
CONF_PRICE_RATING_GAP_TOLERANCE = "price_rating_gap_tolerance"
|
|
||||||
CONF_PRICE_LEVEL_GAP_TOLERANCE = "price_level_gap_tolerance"
|
|
||||||
CONF_AVERAGE_SENSOR_DISPLAY = "average_sensor_display" # "median" or "mean"
|
|
||||||
CONF_PRICE_TREND_THRESHOLD_RISING = "price_trend_threshold_rising"
|
CONF_PRICE_TREND_THRESHOLD_RISING = "price_trend_threshold_rising"
|
||||||
CONF_PRICE_TREND_THRESHOLD_FALLING = "price_trend_threshold_falling"
|
CONF_PRICE_TREND_THRESHOLD_FALLING = "price_trend_threshold_falling"
|
||||||
CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING = "price_trend_threshold_strongly_rising"
|
|
||||||
CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING = "price_trend_threshold_strongly_falling"
|
|
||||||
CONF_VOLATILITY_THRESHOLD_MODERATE = "volatility_threshold_moderate"
|
CONF_VOLATILITY_THRESHOLD_MODERATE = "volatility_threshold_moderate"
|
||||||
CONF_VOLATILITY_THRESHOLD_HIGH = "volatility_threshold_high"
|
CONF_VOLATILITY_THRESHOLD_HIGH = "volatility_threshold_high"
|
||||||
CONF_VOLATILITY_THRESHOLD_VERY_HIGH = "volatility_threshold_very_high"
|
CONF_VOLATILITY_THRESHOLD_VERY_HIGH = "volatility_threshold_very_high"
|
||||||
|
|
@ -97,16 +84,8 @@ DEFAULT_BEST_PRICE_MIN_PERIOD_LENGTH = 60 # 60 minutes minimum period length fo
|
||||||
DEFAULT_PEAK_PRICE_MIN_PERIOD_LENGTH = 30 # 30 minutes minimum period length for peak price (user-facing, minutes)
|
DEFAULT_PEAK_PRICE_MIN_PERIOD_LENGTH = 30 # 30 minutes minimum period length for peak price (user-facing, minutes)
|
||||||
DEFAULT_PRICE_RATING_THRESHOLD_LOW = -10 # Default rating threshold low percentage
|
DEFAULT_PRICE_RATING_THRESHOLD_LOW = -10 # Default rating threshold low percentage
|
||||||
DEFAULT_PRICE_RATING_THRESHOLD_HIGH = 10 # Default rating threshold high percentage
|
DEFAULT_PRICE_RATING_THRESHOLD_HIGH = 10 # Default rating threshold high percentage
|
||||||
DEFAULT_PRICE_RATING_HYSTERESIS = 2.0 # Hysteresis percentage to prevent flickering at threshold boundaries
|
|
||||||
DEFAULT_PRICE_RATING_GAP_TOLERANCE = 1 # Max consecutive intervals to smooth out (0 = disabled)
|
|
||||||
DEFAULT_PRICE_LEVEL_GAP_TOLERANCE = 1 # Max consecutive intervals to smooth out for price level (0 = disabled)
|
|
||||||
DEFAULT_AVERAGE_SENSOR_DISPLAY = "median" # Default: show median in state, mean in attributes
|
|
||||||
DEFAULT_PRICE_TREND_THRESHOLD_RISING = 3 # Default trend threshold for rising prices (%)
|
DEFAULT_PRICE_TREND_THRESHOLD_RISING = 3 # Default trend threshold for rising prices (%)
|
||||||
DEFAULT_PRICE_TREND_THRESHOLD_FALLING = -3 # Default trend threshold for falling prices (%, negative value)
|
DEFAULT_PRICE_TREND_THRESHOLD_FALLING = -3 # Default trend threshold for falling prices (%, negative value)
|
||||||
# Strong trend thresholds default to 2x the base threshold.
|
|
||||||
# These are independently configurable to allow fine-tuning of "strongly" detection.
|
|
||||||
DEFAULT_PRICE_TREND_THRESHOLD_STRONGLY_RISING = 6 # Default strong rising threshold (%)
|
|
||||||
DEFAULT_PRICE_TREND_THRESHOLD_STRONGLY_FALLING = -6 # Default strong falling threshold (%, negative value)
|
|
||||||
# Default volatility thresholds (relative values using coefficient of variation)
|
# Default volatility thresholds (relative values using coefficient of variation)
|
||||||
# Coefficient of variation = (standard_deviation / mean) * 100%
|
# Coefficient of variation = (standard_deviation / mean) * 100%
|
||||||
# These thresholds are unitless and work across different price levels
|
# These thresholds are unitless and work across different price levels
|
||||||
|
|
@ -143,12 +122,6 @@ MIN_PRICE_RATING_THRESHOLD_LOW = -50 # Minimum value for low rating threshold
|
||||||
MAX_PRICE_RATING_THRESHOLD_LOW = -5 # Maximum value for low rating threshold (must be < HIGH)
|
MAX_PRICE_RATING_THRESHOLD_LOW = -5 # Maximum value for low rating threshold (must be < HIGH)
|
||||||
MIN_PRICE_RATING_THRESHOLD_HIGH = 5 # Minimum value for high rating threshold (must be > LOW)
|
MIN_PRICE_RATING_THRESHOLD_HIGH = 5 # Minimum value for high rating threshold (must be > LOW)
|
||||||
MAX_PRICE_RATING_THRESHOLD_HIGH = 50 # Maximum value for high rating threshold
|
MAX_PRICE_RATING_THRESHOLD_HIGH = 50 # Maximum value for high rating threshold
|
||||||
MIN_PRICE_RATING_HYSTERESIS = 0.0 # Minimum hysteresis (0 = disabled)
|
|
||||||
MAX_PRICE_RATING_HYSTERESIS = 5.0 # Maximum hysteresis (5% band)
|
|
||||||
MIN_PRICE_RATING_GAP_TOLERANCE = 0 # Minimum gap tolerance (0 = disabled)
|
|
||||||
MAX_PRICE_RATING_GAP_TOLERANCE = 4 # Maximum gap tolerance (4 intervals = 1 hour)
|
|
||||||
MIN_PRICE_LEVEL_GAP_TOLERANCE = 0 # Minimum gap tolerance for price level (0 = disabled)
|
|
||||||
MAX_PRICE_LEVEL_GAP_TOLERANCE = 4 # Maximum gap tolerance for price level (4 intervals = 1 hour)
|
|
||||||
|
|
||||||
# Volatility threshold limits
|
# Volatility threshold limits
|
||||||
# MODERATE threshold: practical range 5% to 25% (entry point for noticeable fluctuation)
|
# MODERATE threshold: practical range 5% to 25% (entry point for noticeable fluctuation)
|
||||||
|
|
@ -167,11 +140,6 @@ MIN_PRICE_TREND_RISING = 1 # Minimum rising trend threshold
|
||||||
MAX_PRICE_TREND_RISING = 50 # Maximum rising trend threshold
|
MAX_PRICE_TREND_RISING = 50 # Maximum rising trend threshold
|
||||||
MIN_PRICE_TREND_FALLING = -50 # Minimum falling trend threshold (negative)
|
MIN_PRICE_TREND_FALLING = -50 # Minimum falling trend threshold (negative)
|
||||||
MAX_PRICE_TREND_FALLING = -1 # Maximum falling trend threshold (negative)
|
MAX_PRICE_TREND_FALLING = -1 # Maximum falling trend threshold (negative)
|
||||||
# Strong trend thresholds have higher ranges to allow detection of significant moves
|
|
||||||
MIN_PRICE_TREND_STRONGLY_RISING = 2 # Minimum strongly rising threshold (must be > rising)
|
|
||||||
MAX_PRICE_TREND_STRONGLY_RISING = 100 # Maximum strongly rising threshold
|
|
||||||
MIN_PRICE_TREND_STRONGLY_FALLING = -100 # Minimum strongly falling threshold (negative)
|
|
||||||
MAX_PRICE_TREND_STRONGLY_FALLING = -2 # Maximum strongly falling threshold (must be < falling)
|
|
||||||
|
|
||||||
# Gap count and relaxation limits
|
# Gap count and relaxation limits
|
||||||
MIN_GAP_COUNT = 0 # Minimum gap count
|
MIN_GAP_COUNT = 0 # Minimum gap count
|
||||||
|
|
@ -194,22 +162,12 @@ HOME_TYPES = {
|
||||||
# Currency mapping: ISO code -> (major_symbol, minor_symbol, minor_name)
|
# Currency mapping: ISO code -> (major_symbol, minor_symbol, minor_name)
|
||||||
# For currencies with Home Assistant constants, use those; otherwise define custom ones
|
# For currencies with Home Assistant constants, use those; otherwise define custom ones
|
||||||
CURRENCY_INFO = {
|
CURRENCY_INFO = {
|
||||||
"EUR": (CURRENCY_EURO, "ct", "Cents"),
|
"EUR": (CURRENCY_EURO, "ct", "cents"),
|
||||||
"NOK": ("kr", "øre", "Øre"),
|
"NOK": ("kr", "øre", "øre"),
|
||||||
"SEK": ("kr", "öre", "Öre"),
|
"SEK": ("kr", "öre", "öre"),
|
||||||
"DKK": ("kr", "øre", "Øre"),
|
"DKK": ("kr", "øre", "øre"),
|
||||||
"USD": (CURRENCY_DOLLAR, "¢", "Cents"),
|
"USD": (CURRENCY_DOLLAR, "¢", "cents"),
|
||||||
"GBP": ("£", "p", "Pence"),
|
"GBP": ("£", "p", "pence"),
|
||||||
}
|
|
||||||
|
|
||||||
# Base currency names: ISO code -> full currency name (in local language)
|
|
||||||
CURRENCY_NAMES = {
|
|
||||||
"EUR": "Euro",
|
|
||||||
"NOK": "Norske kroner",
|
|
||||||
"SEK": "Svenska kronor",
|
|
||||||
"DKK": "Danske kroner",
|
|
||||||
"USD": "US Dollar",
|
|
||||||
"GBP": "British Pound",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -231,9 +189,9 @@ def get_currency_info(currency_code: str | None) -> tuple[str, str, str]:
|
||||||
return CURRENCY_INFO.get(currency_code.upper(), CURRENCY_INFO["EUR"])
|
return CURRENCY_INFO.get(currency_code.upper(), CURRENCY_INFO["EUR"])
|
||||||
|
|
||||||
|
|
||||||
def format_price_unit_base(currency_code: str | None) -> str:
|
def format_price_unit_major(currency_code: str | None) -> str:
|
||||||
"""
|
"""
|
||||||
Format the price unit string with base currency unit (e.g., '€/kWh').
|
Format the price unit string with major currency unit (e.g., '€/kWh').
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
currency_code: ISO 4217 currency code (e.g., 'EUR', 'NOK', 'SEK')
|
currency_code: ISO 4217 currency code (e.g., 'EUR', 'NOK', 'SEK')
|
||||||
|
|
@ -242,13 +200,13 @@ def format_price_unit_base(currency_code: str | None) -> str:
|
||||||
Formatted unit string like '€/kWh' or 'kr/kWh'
|
Formatted unit string like '€/kWh' or 'kr/kWh'
|
||||||
|
|
||||||
"""
|
"""
|
||||||
base_symbol, _, _ = get_currency_info(currency_code)
|
major_symbol, _, _ = get_currency_info(currency_code)
|
||||||
return f"{base_symbol}/{UnitOfPower.KILO_WATT}{UnitOfTime.HOURS}"
|
return f"{major_symbol}/{UnitOfPower.KILO_WATT}{UnitOfTime.HOURS}"
|
||||||
|
|
||||||
|
|
||||||
def format_price_unit_subunit(currency_code: str | None) -> str:
|
def format_price_unit_minor(currency_code: str | None) -> str:
|
||||||
"""
|
"""
|
||||||
Format the price unit string with subunit currency unit (e.g., 'ct/kWh').
|
Format the price unit string with minor currency unit (e.g., 'ct/kWh').
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
currency_code: ISO 4217 currency code (e.g., 'EUR', 'NOK', 'SEK')
|
currency_code: ISO 4217 currency code (e.g., 'EUR', 'NOK', 'SEK')
|
||||||
|
|
@ -257,180 +215,8 @@ def format_price_unit_subunit(currency_code: str | None) -> str:
|
||||||
Formatted unit string like 'ct/kWh' or 'øre/kWh'
|
Formatted unit string like 'ct/kWh' or 'øre/kWh'
|
||||||
|
|
||||||
"""
|
"""
|
||||||
_, subunit_symbol, _ = get_currency_info(currency_code)
|
_, minor_symbol, _ = get_currency_info(currency_code)
|
||||||
return f"{subunit_symbol}/{UnitOfPower.KILO_WATT}{UnitOfTime.HOURS}"
|
return f"{minor_symbol}/{UnitOfPower.KILO_WATT}{UnitOfTime.HOURS}"
|
||||||
|
|
||||||
|
|
||||||
def get_currency_name(currency_code: str | None) -> str:
|
|
||||||
"""
|
|
||||||
Get the full name of the base currency.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
currency_code: ISO 4217 currency code (e.g., 'EUR', 'NOK', 'SEK')
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Full currency name like 'Euro' or 'Norwegian Krone'
|
|
||||||
Defaults to 'Euro' if currency is not recognized
|
|
||||||
|
|
||||||
"""
|
|
||||||
if not currency_code:
|
|
||||||
currency_code = "EUR"
|
|
||||||
|
|
||||||
return CURRENCY_NAMES.get(currency_code.upper(), CURRENCY_NAMES["EUR"])
|
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Currency Display Mode Configuration
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
# Configuration key for currency display mode
|
|
||||||
CONF_CURRENCY_DISPLAY_MODE = "currency_display_mode"
|
|
||||||
|
|
||||||
# Display mode values
|
|
||||||
DISPLAY_MODE_BASE = "base" # Display in base currency units (€, kr)
|
|
||||||
DISPLAY_MODE_SUBUNIT = "subunit" # Display in subunit currency units (ct, øre)
|
|
||||||
|
|
||||||
# Intelligent per-currency defaults based on market analysis
|
|
||||||
# EUR: Subunit (cents) - established convention in Germany/Netherlands
|
|
||||||
# NOK/SEK/DKK: Base (kroner) - Scandinavian preference for whole units
|
|
||||||
# USD/GBP: Base - international standard
|
|
||||||
DEFAULT_CURRENCY_DISPLAY = {
|
|
||||||
"EUR": DISPLAY_MODE_SUBUNIT,
|
|
||||||
"NOK": DISPLAY_MODE_BASE,
|
|
||||||
"SEK": DISPLAY_MODE_BASE,
|
|
||||||
"DKK": DISPLAY_MODE_BASE,
|
|
||||||
"USD": DISPLAY_MODE_BASE,
|
|
||||||
"GBP": DISPLAY_MODE_BASE,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_default_currency_display(currency_code: str | None) -> str:
|
|
||||||
"""
|
|
||||||
Get intelligent default display mode for a currency.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
currency_code: ISO 4217 currency code (e.g., 'EUR', 'NOK')
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Default display mode ('base' or 'subunit')
|
|
||||||
|
|
||||||
"""
|
|
||||||
if not currency_code:
|
|
||||||
return DISPLAY_MODE_SUBUNIT # Fallback default
|
|
||||||
|
|
||||||
return DEFAULT_CURRENCY_DISPLAY.get(currency_code.upper(), DISPLAY_MODE_SUBUNIT)
|
|
||||||
|
|
||||||
|
|
||||||
def get_default_options(currency_code: str | None) -> dict[str, Any]:
|
|
||||||
"""
|
|
||||||
Get complete default options for a new config entry.
|
|
||||||
|
|
||||||
This ensures new config entries have explicitly set defaults based on their currency,
|
|
||||||
distinguishing them from legacy config entries that need migration.
|
|
||||||
|
|
||||||
Options structure has been flattened for single-section steps:
|
|
||||||
- Flat values: extended_descriptions, average_sensor_display, currency_display_mode,
|
|
||||||
price_rating_thresholds, volatility_thresholds, price_trend_thresholds, time offsets
|
|
||||||
- Nested sections (multi-section steps only): period_settings, flexibility_settings,
|
|
||||||
relaxation_and_target_periods
|
|
||||||
|
|
||||||
Args:
|
|
||||||
currency_code: ISO 4217 currency code (e.g., 'EUR', 'NOK')
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dictionary with all default option values in nested section structure
|
|
||||||
|
|
||||||
"""
|
|
||||||
return {
|
|
||||||
# Flat configuration values
|
|
||||||
CONF_EXTENDED_DESCRIPTIONS: DEFAULT_EXTENDED_DESCRIPTIONS,
|
|
||||||
CONF_AVERAGE_SENSOR_DISPLAY: DEFAULT_AVERAGE_SENSOR_DISPLAY,
|
|
||||||
CONF_CURRENCY_DISPLAY_MODE: get_default_currency_display(currency_code),
|
|
||||||
CONF_VIRTUAL_TIME_OFFSET_DAYS: DEFAULT_VIRTUAL_TIME_OFFSET_DAYS,
|
|
||||||
CONF_VIRTUAL_TIME_OFFSET_HOURS: DEFAULT_VIRTUAL_TIME_OFFSET_HOURS,
|
|
||||||
CONF_VIRTUAL_TIME_OFFSET_MINUTES: DEFAULT_VIRTUAL_TIME_OFFSET_MINUTES,
|
|
||||||
# Price rating settings (flat - single-section step)
|
|
||||||
CONF_PRICE_RATING_THRESHOLD_LOW: DEFAULT_PRICE_RATING_THRESHOLD_LOW,
|
|
||||||
CONF_PRICE_RATING_THRESHOLD_HIGH: DEFAULT_PRICE_RATING_THRESHOLD_HIGH,
|
|
||||||
CONF_PRICE_RATING_HYSTERESIS: DEFAULT_PRICE_RATING_HYSTERESIS,
|
|
||||||
CONF_PRICE_RATING_GAP_TOLERANCE: DEFAULT_PRICE_RATING_GAP_TOLERANCE,
|
|
||||||
CONF_PRICE_LEVEL_GAP_TOLERANCE: DEFAULT_PRICE_LEVEL_GAP_TOLERANCE,
|
|
||||||
# Volatility thresholds (flat - single-section step)
|
|
||||||
CONF_VOLATILITY_THRESHOLD_MODERATE: DEFAULT_VOLATILITY_THRESHOLD_MODERATE,
|
|
||||||
CONF_VOLATILITY_THRESHOLD_HIGH: DEFAULT_VOLATILITY_THRESHOLD_HIGH,
|
|
||||||
CONF_VOLATILITY_THRESHOLD_VERY_HIGH: DEFAULT_VOLATILITY_THRESHOLD_VERY_HIGH,
|
|
||||||
# Price trend thresholds (flat - single-section step)
|
|
||||||
CONF_PRICE_TREND_THRESHOLD_RISING: DEFAULT_PRICE_TREND_THRESHOLD_RISING,
|
|
||||||
CONF_PRICE_TREND_THRESHOLD_FALLING: DEFAULT_PRICE_TREND_THRESHOLD_FALLING,
|
|
||||||
# Nested section: Period settings (shared by best/peak price)
|
|
||||||
"period_settings": {
|
|
||||||
CONF_BEST_PRICE_MIN_PERIOD_LENGTH: DEFAULT_BEST_PRICE_MIN_PERIOD_LENGTH,
|
|
||||||
CONF_PEAK_PRICE_MIN_PERIOD_LENGTH: DEFAULT_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
|
||||||
CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT: DEFAULT_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
|
||||||
CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT: DEFAULT_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
|
||||||
CONF_BEST_PRICE_MAX_LEVEL: DEFAULT_BEST_PRICE_MAX_LEVEL,
|
|
||||||
CONF_PEAK_PRICE_MIN_LEVEL: DEFAULT_PEAK_PRICE_MIN_LEVEL,
|
|
||||||
},
|
|
||||||
# Nested section: Flexibility settings (shared by best/peak price)
|
|
||||||
"flexibility_settings": {
|
|
||||||
CONF_BEST_PRICE_FLEX: DEFAULT_BEST_PRICE_FLEX,
|
|
||||||
CONF_PEAK_PRICE_FLEX: DEFAULT_PEAK_PRICE_FLEX,
|
|
||||||
CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG: DEFAULT_BEST_PRICE_MIN_DISTANCE_FROM_AVG,
|
|
||||||
CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG: DEFAULT_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
|
||||||
},
|
|
||||||
# Nested section: Relaxation and target periods (shared by best/peak price)
|
|
||||||
"relaxation_and_target_periods": {
|
|
||||||
CONF_ENABLE_MIN_PERIODS_BEST: DEFAULT_ENABLE_MIN_PERIODS_BEST,
|
|
||||||
CONF_MIN_PERIODS_BEST: DEFAULT_MIN_PERIODS_BEST,
|
|
||||||
CONF_RELAXATION_ATTEMPTS_BEST: DEFAULT_RELAXATION_ATTEMPTS_BEST,
|
|
||||||
CONF_ENABLE_MIN_PERIODS_PEAK: DEFAULT_ENABLE_MIN_PERIODS_PEAK,
|
|
||||||
CONF_MIN_PERIODS_PEAK: DEFAULT_MIN_PERIODS_PEAK,
|
|
||||||
CONF_RELAXATION_ATTEMPTS_PEAK: DEFAULT_RELAXATION_ATTEMPTS_PEAK,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_display_unit_factor(config_entry: ConfigEntry) -> int:
|
|
||||||
"""
|
|
||||||
Get multiplication factor for converting base to display currency.
|
|
||||||
|
|
||||||
Internal storage is ALWAYS in base currency (4 decimals precision).
|
|
||||||
This function returns the conversion factor based on user configuration.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config_entry: ConfigEntry with currency_display_mode option
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
100 for subunit currency display, 1 for base currency display
|
|
||||||
|
|
||||||
Example:
|
|
||||||
price_base = 0.2534 # Internal: 0.2534 €/kWh
|
|
||||||
factor = get_display_unit_factor(config_entry)
|
|
||||||
display_value = round(price_base * factor, 2)
|
|
||||||
# → 25.34 ct/kWh (subunit) or 0.25 €/kWh (base)
|
|
||||||
|
|
||||||
"""
|
|
||||||
display_mode = config_entry.options.get(CONF_CURRENCY_DISPLAY_MODE, DISPLAY_MODE_SUBUNIT)
|
|
||||||
return 100 if display_mode == DISPLAY_MODE_SUBUNIT else 1
|
|
||||||
|
|
||||||
|
|
||||||
def get_display_unit_string(config_entry: ConfigEntry, currency_code: str | None) -> str:
|
|
||||||
"""
|
|
||||||
Get unit string for display based on configuration.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config_entry: ConfigEntry with currency_display_mode option
|
|
||||||
currency_code: ISO 4217 currency code
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Formatted unit string (e.g., 'ct/kWh' or '€/kWh')
|
|
||||||
|
|
||||||
"""
|
|
||||||
display_mode = config_entry.options.get(CONF_CURRENCY_DISPLAY_MODE, DISPLAY_MODE_SUBUNIT)
|
|
||||||
|
|
||||||
if display_mode == DISPLAY_MODE_SUBUNIT:
|
|
||||||
return format_price_unit_subunit(currency_code)
|
|
||||||
return format_price_unit_base(currency_code)
|
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
|
|
@ -458,14 +244,6 @@ VOLATILITY_MODERATE = "MODERATE"
|
||||||
VOLATILITY_HIGH = "HIGH"
|
VOLATILITY_HIGH = "HIGH"
|
||||||
VOLATILITY_VERY_HIGH = "VERY_HIGH"
|
VOLATILITY_VERY_HIGH = "VERY_HIGH"
|
||||||
|
|
||||||
# Price trend constants (calculated values with 5-level scale)
|
|
||||||
# Used by trend sensors: momentary, short-term, mid-term, long-term
|
|
||||||
PRICE_TREND_STRONGLY_FALLING = "strongly_falling"
|
|
||||||
PRICE_TREND_FALLING = "falling"
|
|
||||||
PRICE_TREND_STABLE = "stable"
|
|
||||||
PRICE_TREND_RISING = "rising"
|
|
||||||
PRICE_TREND_STRONGLY_RISING = "strongly_rising"
|
|
||||||
|
|
||||||
# Sensor options (lowercase versions for ENUM device class)
|
# Sensor options (lowercase versions for ENUM device class)
|
||||||
# NOTE: These constants define the valid enum options, but they are not used directly
|
# NOTE: These constants define the valid enum options, but they are not used directly
|
||||||
# in sensor/definitions.py due to import timing issues. Instead, the options are defined inline
|
# in sensor/definitions.py due to import timing issues. Instead, the options are defined inline
|
||||||
|
|
@ -491,15 +269,6 @@ VOLATILITY_OPTIONS = [
|
||||||
VOLATILITY_VERY_HIGH.lower(),
|
VOLATILITY_VERY_HIGH.lower(),
|
||||||
]
|
]
|
||||||
|
|
||||||
# Trend options for enum sensors (lowercase versions for ENUM device class)
|
|
||||||
PRICE_TREND_OPTIONS = [
|
|
||||||
PRICE_TREND_STRONGLY_FALLING,
|
|
||||||
PRICE_TREND_FALLING,
|
|
||||||
PRICE_TREND_STABLE,
|
|
||||||
PRICE_TREND_RISING,
|
|
||||||
PRICE_TREND_STRONGLY_RISING,
|
|
||||||
]
|
|
||||||
|
|
||||||
# Valid options for best price maximum level filter
|
# Valid options for best price maximum level filter
|
||||||
# Sorted from cheap to expensive: user selects "up to how expensive"
|
# Sorted from cheap to expensive: user selects "up to how expensive"
|
||||||
BEST_PRICE_MAX_LEVEL_OPTIONS = [
|
BEST_PRICE_MAX_LEVEL_OPTIONS = [
|
||||||
|
|
@ -542,16 +311,6 @@ PRICE_RATING_MAPPING = {
|
||||||
PRICE_RATING_HIGH: 1,
|
PRICE_RATING_HIGH: 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
# Mapping for comparing price trends (used for sorting and automation comparisons)
|
|
||||||
# Values range from -2 (strongly falling) to +2 (strongly rising), with 0 = stable
|
|
||||||
PRICE_TREND_MAPPING = {
|
|
||||||
PRICE_TREND_STRONGLY_FALLING: -2,
|
|
||||||
PRICE_TREND_FALLING: -1,
|
|
||||||
PRICE_TREND_STABLE: 0,
|
|
||||||
PRICE_TREND_RISING: 1,
|
|
||||||
PRICE_TREND_STRONGLY_RISING: 2,
|
|
||||||
}
|
|
||||||
|
|
||||||
# Icon mapping for price levels (dynamic icons based on level)
|
# Icon mapping for price levels (dynamic icons based on level)
|
||||||
PRICE_LEVEL_ICON_MAPPING = {
|
PRICE_LEVEL_ICON_MAPPING = {
|
||||||
PRICE_LEVEL_VERY_CHEAP: "mdi:gauge-empty",
|
PRICE_LEVEL_VERY_CHEAP: "mdi:gauge-empty",
|
||||||
|
|
|
||||||
|
|
@ -1,28 +1,4 @@
|
||||||
"""
|
"""Cache management for coordinator module."""
|
||||||
Cache management for coordinator persistent storage.
|
|
||||||
|
|
||||||
This module handles persistent storage for the coordinator, storing:
|
|
||||||
- user_data: Account/home metadata (required, refreshed daily)
|
|
||||||
- Timestamps for cache validation and lifecycle tracking
|
|
||||||
|
|
||||||
**Storage Architecture (as of v0.25.0):**
|
|
||||||
|
|
||||||
There are TWO persistent storage files per config entry:
|
|
||||||
|
|
||||||
1. `tibber_prices.{entry_id}` (this module)
|
|
||||||
- user_data: Account info, home metadata, timezone, currency
|
|
||||||
- Timestamps: last_user_update, last_midnight_check
|
|
||||||
|
|
||||||
2. `tibber_prices.interval_pool.{entry_id}` (interval_pool/storage.py)
|
|
||||||
- Intervals: Deduplicated quarter-hourly price data (source of truth)
|
|
||||||
- Fetch metadata: When each interval was fetched
|
|
||||||
- Protected range: Which intervals to keep during cleanup
|
|
||||||
|
|
||||||
**Single Source of Truth:**
|
|
||||||
Price intervals are ONLY stored in IntervalPool. This cache stores only
|
|
||||||
user metadata and timestamps. The IntervalPool handles all price data
|
|
||||||
fetching, caching, and persistence independently.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
|
@ -40,9 +16,11 @@ _LOGGER = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class TibberPricesCacheData(NamedTuple):
|
class TibberPricesCacheData(NamedTuple):
|
||||||
"""Cache data structure for user metadata (price data is in IntervalPool)."""
|
"""Cache data structure."""
|
||||||
|
|
||||||
|
price_data: dict[str, Any] | None
|
||||||
user_data: dict[str, Any] | None
|
user_data: dict[str, Any] | None
|
||||||
|
last_price_update: datetime | None
|
||||||
last_user_update: datetime | None
|
last_user_update: datetime | None
|
||||||
last_midnight_check: datetime | None
|
last_midnight_check: datetime | None
|
||||||
|
|
||||||
|
|
@ -53,16 +31,20 @@ async def load_cache(
|
||||||
*,
|
*,
|
||||||
time: TibberPricesTimeService,
|
time: TibberPricesTimeService,
|
||||||
) -> TibberPricesCacheData:
|
) -> TibberPricesCacheData:
|
||||||
"""Load cached user data from storage (price data is in IntervalPool)."""
|
"""Load cached data from storage."""
|
||||||
try:
|
try:
|
||||||
stored = await store.async_load()
|
stored = await store.async_load()
|
||||||
if stored:
|
if stored:
|
||||||
|
cached_price_data = stored.get("price_data")
|
||||||
cached_user_data = stored.get("user_data")
|
cached_user_data = stored.get("user_data")
|
||||||
|
|
||||||
# Restore timestamps
|
# Restore timestamps
|
||||||
|
last_price_update = None
|
||||||
last_user_update = None
|
last_user_update = None
|
||||||
last_midnight_check = None
|
last_midnight_check = None
|
||||||
|
|
||||||
|
if last_price_update_str := stored.get("last_price_update"):
|
||||||
|
last_price_update = time.parse_datetime(last_price_update_str)
|
||||||
if last_user_update_str := stored.get("last_user_update"):
|
if last_user_update_str := stored.get("last_user_update"):
|
||||||
last_user_update = time.parse_datetime(last_user_update_str)
|
last_user_update = time.parse_datetime(last_user_update_str)
|
||||||
if last_midnight_check_str := stored.get("last_midnight_check"):
|
if last_midnight_check_str := stored.get("last_midnight_check"):
|
||||||
|
|
@ -70,7 +52,9 @@ async def load_cache(
|
||||||
|
|
||||||
_LOGGER.debug("%s Cache loaded successfully", log_prefix)
|
_LOGGER.debug("%s Cache loaded successfully", log_prefix)
|
||||||
return TibberPricesCacheData(
|
return TibberPricesCacheData(
|
||||||
|
price_data=cached_price_data,
|
||||||
user_data=cached_user_data,
|
user_data=cached_user_data,
|
||||||
|
last_price_update=last_price_update,
|
||||||
last_user_update=last_user_update,
|
last_user_update=last_user_update,
|
||||||
last_midnight_check=last_midnight_check,
|
last_midnight_check=last_midnight_check,
|
||||||
)
|
)
|
||||||
|
|
@ -80,7 +64,9 @@ async def load_cache(
|
||||||
_LOGGER.warning("%s Failed to load cache: %s", log_prefix, ex)
|
_LOGGER.warning("%s Failed to load cache: %s", log_prefix, ex)
|
||||||
|
|
||||||
return TibberPricesCacheData(
|
return TibberPricesCacheData(
|
||||||
|
price_data=None,
|
||||||
user_data=None,
|
user_data=None,
|
||||||
|
last_price_update=None,
|
||||||
last_user_update=None,
|
last_user_update=None,
|
||||||
last_midnight_check=None,
|
last_midnight_check=None,
|
||||||
)
|
)
|
||||||
|
|
@ -91,9 +77,11 @@ async def save_cache(
|
||||||
cache_data: TibberPricesCacheData,
|
cache_data: TibberPricesCacheData,
|
||||||
log_prefix: str,
|
log_prefix: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Store cache data (user metadata only, price data is in IntervalPool)."""
|
"""Store cache data."""
|
||||||
data = {
|
data = {
|
||||||
|
"price_data": cache_data.price_data,
|
||||||
"user_data": cache_data.user_data,
|
"user_data": cache_data.user_data,
|
||||||
|
"last_price_update": (cache_data.last_price_update.isoformat() if cache_data.last_price_update else None),
|
||||||
"last_user_update": (cache_data.last_user_update.isoformat() if cache_data.last_user_update else None),
|
"last_user_update": (cache_data.last_user_update.isoformat() if cache_data.last_user_update else None),
|
||||||
"last_midnight_check": (cache_data.last_midnight_check.isoformat() if cache_data.last_midnight_check else None),
|
"last_midnight_check": (cache_data.last_midnight_check.isoformat() if cache_data.last_midnight_check else None),
|
||||||
}
|
}
|
||||||
|
|
@ -103,3 +91,55 @@ async def save_cache(
|
||||||
_LOGGER.debug("%s Cache stored successfully", log_prefix)
|
_LOGGER.debug("%s Cache stored successfully", log_prefix)
|
||||||
except OSError:
|
except OSError:
|
||||||
_LOGGER.exception("%s Failed to store cache", log_prefix)
|
_LOGGER.exception("%s Failed to store cache", log_prefix)
|
||||||
|
|
||||||
|
|
||||||
|
def is_cache_valid(
|
||||||
|
cache_data: TibberPricesCacheData,
|
||||||
|
log_prefix: str,
|
||||||
|
*,
|
||||||
|
time: TibberPricesTimeService,
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Validate if cached price data is still current.
|
||||||
|
|
||||||
|
Returns False if:
|
||||||
|
- No cached data exists
|
||||||
|
- Cached data is from a different calendar day (in local timezone)
|
||||||
|
- Midnight turnover has occurred since cache was saved
|
||||||
|
- Cache structure is outdated (pre-v0.15.0 multi-home format)
|
||||||
|
|
||||||
|
"""
|
||||||
|
if cache_data.price_data is None or cache_data.last_price_update is None:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check for old cache structure (multi-home format from v0.14.0)
|
||||||
|
# Old format: {"homes": {home_id: {...}}}
|
||||||
|
# New format: {"home_id": str, "price_info": [...]}
|
||||||
|
if "homes" in cache_data.price_data:
|
||||||
|
_LOGGER.info(
|
||||||
|
"%s Cache has old multi-home structure (v0.14.0), invalidating to fetch fresh data",
|
||||||
|
log_prefix,
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check for missing required keys in new structure
|
||||||
|
if "price_info" not in cache_data.price_data:
|
||||||
|
_LOGGER.info(
|
||||||
|
"%s Cache missing 'price_info' key, invalidating to fetch fresh data",
|
||||||
|
log_prefix,
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
current_local_date = time.as_local(time.now()).date()
|
||||||
|
last_update_local_date = time.as_local(cache_data.last_price_update).date()
|
||||||
|
|
||||||
|
if current_local_date != last_update_local_date:
|
||||||
|
_LOGGER.debug(
|
||||||
|
"%s Cache date mismatch: cached=%s, current=%s",
|
||||||
|
log_prefix,
|
||||||
|
last_update_local_date,
|
||||||
|
current_local_date,
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
|
||||||
|
|
@ -31,7 +31,6 @@ TIME_SENSITIVE_ENTITY_KEYS = frozenset(
|
||||||
{
|
{
|
||||||
# Current/next/previous price sensors
|
# Current/next/previous price sensors
|
||||||
"current_interval_price",
|
"current_interval_price",
|
||||||
"current_interval_price_base",
|
|
||||||
"next_interval_price",
|
"next_interval_price",
|
||||||
"previous_interval_price",
|
"previous_interval_price",
|
||||||
# Current/next/previous price levels
|
# Current/next/previous price levels
|
||||||
|
|
@ -85,11 +84,7 @@ TIME_SENSITIVE_ENTITY_KEYS = frozenset(
|
||||||
"best_price_next_start_time",
|
"best_price_next_start_time",
|
||||||
"peak_price_end_time",
|
"peak_price_end_time",
|
||||||
"peak_price_next_start_time",
|
"peak_price_next_start_time",
|
||||||
# Lifecycle sensor needs quarter-hour precision for state transitions:
|
# Lifecycle sensor (needs quarter-hour updates for turnover_pending detection at 23:45)
|
||||||
# - 23:45: turnover_pending (last interval before midnight)
|
|
||||||
# - 00:00: turnover complete (after midnight API update)
|
|
||||||
# - 13:00: searching_tomorrow (when tomorrow data search begins)
|
|
||||||
# Uses state-change filter in _handle_time_sensitive_update() to prevent recorder spam
|
|
||||||
"data_lifecycle_status",
|
"data_lifecycle_status",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -11,6 +11,7 @@ from homeassistant.helpers.storage import Store
|
||||||
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
|
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
from collections.abc import Callable
|
||||||
from datetime import date, datetime
|
from datetime import date, datetime
|
||||||
|
|
||||||
from homeassistant.config_entries import ConfigEntry
|
from homeassistant.config_entries import ConfigEntry
|
||||||
|
|
@ -34,12 +35,11 @@ from .constants import (
|
||||||
STORAGE_VERSION,
|
STORAGE_VERSION,
|
||||||
UPDATE_INTERVAL,
|
UPDATE_INTERVAL,
|
||||||
)
|
)
|
||||||
|
from .data_fetching import TibberPricesDataFetcher
|
||||||
from .data_transformation import TibberPricesDataTransformer
|
from .data_transformation import TibberPricesDataTransformer
|
||||||
from .listeners import TibberPricesListenerManager
|
from .listeners import TibberPricesListenerManager
|
||||||
from .midnight_handler import TibberPricesMidnightHandler
|
from .midnight_handler import TibberPricesMidnightHandler
|
||||||
from .periods import TibberPricesPeriodCalculator
|
from .periods import TibberPricesPeriodCalculator
|
||||||
from .price_data_manager import TibberPricesPriceDataManager
|
|
||||||
from .repairs import TibberPricesRepairManager
|
|
||||||
from .time_service import TibberPricesTimeService
|
from .time_service import TibberPricesTimeService
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
_LOGGER = logging.getLogger(__name__)
|
||||||
|
|
@ -205,20 +205,12 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
||||||
# Initialize helper modules
|
# Initialize helper modules
|
||||||
self._listener_manager = TibberPricesListenerManager(hass, self._log_prefix)
|
self._listener_manager = TibberPricesListenerManager(hass, self._log_prefix)
|
||||||
self._midnight_handler = TibberPricesMidnightHandler()
|
self._midnight_handler = TibberPricesMidnightHandler()
|
||||||
self._price_data_manager = TibberPricesPriceDataManager(
|
self._data_fetcher = TibberPricesDataFetcher(
|
||||||
api=self.api,
|
api=self.api,
|
||||||
store=self._store,
|
store=self._store,
|
||||||
log_prefix=self._log_prefix,
|
log_prefix=self._log_prefix,
|
||||||
user_update_interval=timedelta(days=1),
|
user_update_interval=timedelta(days=1),
|
||||||
time=self.time,
|
time=self.time,
|
||||||
home_id=self._home_id,
|
|
||||||
interval_pool=self.interval_pool,
|
|
||||||
)
|
|
||||||
# Create period calculator BEFORE data transformer (transformer needs it in lambda)
|
|
||||||
self._period_calculator = TibberPricesPeriodCalculator(
|
|
||||||
config_entry=config_entry,
|
|
||||||
log_prefix=self._log_prefix,
|
|
||||||
get_config_override_fn=self.get_config_override,
|
|
||||||
)
|
)
|
||||||
self._data_transformer = TibberPricesDataTransformer(
|
self._data_transformer = TibberPricesDataTransformer(
|
||||||
config_entry=config_entry,
|
config_entry=config_entry,
|
||||||
|
|
@ -228,38 +220,30 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
||||||
),
|
),
|
||||||
time=self.time,
|
time=self.time,
|
||||||
)
|
)
|
||||||
self._repair_manager = TibberPricesRepairManager(
|
self._period_calculator = TibberPricesPeriodCalculator(
|
||||||
hass=hass,
|
config_entry=config_entry,
|
||||||
entry_id=config_entry.entry_id,
|
log_prefix=self._log_prefix,
|
||||||
home_name=config_entry.title,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Register options update listener to invalidate config caches
|
# Register options update listener to invalidate config caches
|
||||||
config_entry.async_on_unload(config_entry.add_update_listener(self._handle_options_update))
|
config_entry.async_on_unload(config_entry.add_update_listener(self._handle_options_update))
|
||||||
|
|
||||||
# User data cache (price data is in IntervalPool)
|
# Legacy compatibility - keep references for methods that access directly
|
||||||
self._cached_user_data: dict[str, Any] | None = None
|
self._cached_user_data: dict[str, Any] | None = None
|
||||||
self._last_user_update: datetime | None = None
|
self._last_user_update: datetime | None = None
|
||||||
self._user_update_interval = timedelta(days=1)
|
self._user_update_interval = timedelta(days=1)
|
||||||
|
self._cached_price_data: dict[str, Any] | None = None
|
||||||
|
self._last_price_update: datetime | None = None
|
||||||
|
|
||||||
# Data lifecycle tracking
|
# Data lifecycle tracking for diagnostic sensor
|
||||||
# Note: _lifecycle_state is used for DIAGNOSTICS only (diagnostics.py export).
|
|
||||||
# The lifecycle SENSOR calculates its state dynamically in get_lifecycle_state(),
|
|
||||||
# using: _is_fetching, last_exception, time calculations, _needs_tomorrow_data(),
|
|
||||||
# and _last_price_update. It does NOT read _lifecycle_state!
|
|
||||||
self._lifecycle_state: str = (
|
self._lifecycle_state: str = (
|
||||||
"cached" # For diagnostics: cached, fresh, refreshing, searching_tomorrow, turnover_pending, error
|
"cached" # Current state: cached, fresh, refreshing, searching_tomorrow, turnover_pending, error
|
||||||
)
|
)
|
||||||
self._last_price_update: datetime | None = None # When price data was last fetched from API
|
|
||||||
self._api_calls_today: int = 0 # Counter for API calls today
|
self._api_calls_today: int = 0 # Counter for API calls today
|
||||||
self._last_api_call_date: date | None = None # Date of last API call (for daily reset)
|
self._last_api_call_date: date | None = None # Date of last API call (for daily reset)
|
||||||
self._is_fetching: bool = False # Flag to track active API fetch (read by lifecycle sensor)
|
self._is_fetching: bool = False # Flag to track active API fetch
|
||||||
self._last_coordinator_update: datetime | None = None # When Timer #1 last ran (_async_update_data)
|
self._last_coordinator_update: datetime | None = None # When Timer #1 last ran (_async_update_data)
|
||||||
|
self._lifecycle_callbacks: list[Callable[[], None]] = [] # Push-update callbacks for lifecycle sensor
|
||||||
# Runtime config overrides from config entities (number/switch)
|
|
||||||
# Structure: {"section_name": {"config_key": value, ...}, ...}
|
|
||||||
# When set, these override the corresponding options from config_entry.options
|
|
||||||
self._config_overrides: dict[str, dict[str, Any]] = {}
|
|
||||||
|
|
||||||
# Start timers
|
# Start timers
|
||||||
self._listener_manager.schedule_quarter_hour_refresh(self._handle_quarter_hour_refresh)
|
self._listener_manager.schedule_quarter_hour_refresh(self._handle_quarter_hour_refresh)
|
||||||
|
|
@ -271,129 +255,12 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
||||||
getattr(_LOGGER, level)(prefixed_message, *args, **kwargs)
|
getattr(_LOGGER, level)(prefixed_message, *args, **kwargs)
|
||||||
|
|
||||||
async def _handle_options_update(self, _hass: HomeAssistant, _config_entry: ConfigEntry) -> None:
|
async def _handle_options_update(self, _hass: HomeAssistant, _config_entry: ConfigEntry) -> None:
|
||||||
"""Handle options update by invalidating config caches and re-transforming data."""
|
"""Handle options update by invalidating config caches."""
|
||||||
self._log("debug", "Options update triggered, re-transforming data")
|
self._log("debug", "Options updated, invalidating config caches")
|
||||||
self._data_transformer.invalidate_config_cache()
|
self._data_transformer.invalidate_config_cache()
|
||||||
self._period_calculator.invalidate_config_cache()
|
self._period_calculator.invalidate_config_cache()
|
||||||
|
# Trigger a refresh to apply new configuration
|
||||||
# Re-transform existing data with new configuration
|
await self.async_request_refresh()
|
||||||
# This updates rating_levels, volatility, and period calculations
|
|
||||||
# without needing to fetch new data from the API
|
|
||||||
if self.data and "priceInfo" in self.data:
|
|
||||||
# Extract raw price_info and re-transform
|
|
||||||
raw_data = {"price_info": self.data["priceInfo"]}
|
|
||||||
self.data = self._transform_data(raw_data)
|
|
||||||
self.async_update_listeners()
|
|
||||||
else:
|
|
||||||
self._log("debug", "No data to re-transform")
|
|
||||||
|
|
||||||
# =========================================================================
|
|
||||||
# Runtime Config Override Methods (for number/switch entities)
|
|
||||||
# =========================================================================
|
|
||||||
|
|
||||||
def set_config_override(self, config_key: str, config_section: str, value: Any) -> None:
|
|
||||||
"""
|
|
||||||
Set a runtime config override value.
|
|
||||||
|
|
||||||
These overrides take precedence over options from config_entry.options
|
|
||||||
and are used by number/switch entities for runtime configuration.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config_key: The configuration key (e.g., CONF_BEST_PRICE_FLEX)
|
|
||||||
config_section: The section in options (e.g., "flexibility_settings")
|
|
||||||
value: The override value
|
|
||||||
|
|
||||||
"""
|
|
||||||
if config_section not in self._config_overrides:
|
|
||||||
self._config_overrides[config_section] = {}
|
|
||||||
self._config_overrides[config_section][config_key] = value
|
|
||||||
self._log(
|
|
||||||
"debug",
|
|
||||||
"Config override set: %s.%s = %s",
|
|
||||||
config_section,
|
|
||||||
config_key,
|
|
||||||
value,
|
|
||||||
)
|
|
||||||
|
|
||||||
def remove_config_override(self, config_key: str, config_section: str) -> None:
|
|
||||||
"""
|
|
||||||
Remove a runtime config override value.
|
|
||||||
|
|
||||||
After removal, the value from config_entry.options will be used again.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config_key: The configuration key to remove
|
|
||||||
config_section: The section the key belongs to
|
|
||||||
|
|
||||||
"""
|
|
||||||
if config_section in self._config_overrides:
|
|
||||||
self._config_overrides[config_section].pop(config_key, None)
|
|
||||||
# Clean up empty sections
|
|
||||||
if not self._config_overrides[config_section]:
|
|
||||||
del self._config_overrides[config_section]
|
|
||||||
self._log(
|
|
||||||
"debug",
|
|
||||||
"Config override removed: %s.%s",
|
|
||||||
config_section,
|
|
||||||
config_key,
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_config_override(self, config_key: str, config_section: str) -> Any | None:
|
|
||||||
"""
|
|
||||||
Get a runtime config override value if set.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config_key: The configuration key to check
|
|
||||||
config_section: The section the key belongs to
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The override value if set, None otherwise
|
|
||||||
|
|
||||||
"""
|
|
||||||
return self._config_overrides.get(config_section, {}).get(config_key)
|
|
||||||
|
|
||||||
def has_config_override(self, config_key: str, config_section: str) -> bool:
|
|
||||||
"""
|
|
||||||
Check if a runtime config override is set.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config_key: The configuration key to check
|
|
||||||
config_section: The section the key belongs to
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if an override is set, False otherwise
|
|
||||||
|
|
||||||
"""
|
|
||||||
return config_key in self._config_overrides.get(config_section, {})
|
|
||||||
|
|
||||||
def get_active_overrides(self) -> dict[str, dict[str, Any]]:
|
|
||||||
"""
|
|
||||||
Get all active config overrides.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dictionary of all active overrides by section
|
|
||||||
|
|
||||||
"""
|
|
||||||
return self._config_overrides.copy()
|
|
||||||
|
|
||||||
async def async_handle_config_override_update(self) -> None:
|
|
||||||
"""
|
|
||||||
Handle config override change by invalidating caches and re-transforming data.
|
|
||||||
|
|
||||||
This is called by number/switch entities when their values change.
|
|
||||||
Uses the same logic as options update to ensure consistent behavior.
|
|
||||||
"""
|
|
||||||
self._log("debug", "Config override update triggered, re-transforming data")
|
|
||||||
self._data_transformer.invalidate_config_cache()
|
|
||||||
self._period_calculator.invalidate_config_cache()
|
|
||||||
|
|
||||||
# Re-transform existing data with new configuration
|
|
||||||
if self.data and "priceInfo" in self.data:
|
|
||||||
raw_data = {"price_info": self.data["priceInfo"]}
|
|
||||||
self.data = self._transform_data(raw_data)
|
|
||||||
self.async_update_listeners()
|
|
||||||
else:
|
|
||||||
self._log("debug", "No data to re-transform")
|
|
||||||
|
|
||||||
@callback
|
@callback
|
||||||
def async_add_time_sensitive_listener(self, update_callback: TimeServiceCallback) -> CALLBACK_TYPE:
|
def async_add_time_sensitive_listener(self, update_callback: TimeServiceCallback) -> CALLBACK_TYPE:
|
||||||
|
|
@ -473,7 +340,7 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
||||||
|
|
||||||
# Update helper modules with fresh TimeService instance
|
# Update helper modules with fresh TimeService instance
|
||||||
self.api.time = time_service
|
self.api.time = time_service
|
||||||
self._price_data_manager.time = time_service
|
self._data_fetcher.time = time_service
|
||||||
self._data_transformer.time = time_service
|
self._data_transformer.time = time_service
|
||||||
self._period_calculator.time = time_service
|
self._period_calculator.time = time_service
|
||||||
|
|
||||||
|
|
@ -573,13 +440,18 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
||||||
current_date,
|
current_date,
|
||||||
)
|
)
|
||||||
|
|
||||||
# With flat interval list architecture and IntervalPool as source of truth,
|
# With flat interval list architecture, no rotation needed!
|
||||||
# no data rotation needed! get_intervals_for_day_offsets() automatically
|
# get_intervals_for_day_offsets() automatically filters by date.
|
||||||
# filters by date. Just re-transform to refresh enrichment.
|
# Just update coordinator's data to trigger entity updates.
|
||||||
if self.data and "priceInfo" in self.data:
|
if self.data and self._cached_price_data:
|
||||||
# Re-transform data to ensure enrichment is refreshed for new day
|
# Re-transform data to ensure enrichment is refreshed
|
||||||
raw_data = {"price_info": self.data["priceInfo"]}
|
self.data = self._transform_data(self._cached_price_data)
|
||||||
self.data = self._transform_data(raw_data)
|
|
||||||
|
# CRITICAL: Update _last_price_update to current time after midnight
|
||||||
|
# This prevents cache_validity from showing "date_mismatch" after midnight
|
||||||
|
# The data is still valid (just rotated today→yesterday, tomorrow→today)
|
||||||
|
# Update timestamp to reflect that the data is current for the new day
|
||||||
|
self._last_price_update = now
|
||||||
|
|
||||||
# Mark turnover as done for today (atomic update)
|
# Mark turnover as done for today (atomic update)
|
||||||
self._midnight_handler.mark_turnover_done(now)
|
self._midnight_handler.mark_turnover_done(now)
|
||||||
|
|
@ -632,14 +504,11 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
||||||
- Timer #2: Quarter-hour entity updates
|
- Timer #2: Quarter-hour entity updates
|
||||||
- Timer #3: Minute timing sensor updates
|
- Timer #3: Minute timing sensor updates
|
||||||
|
|
||||||
Also saves cache to persist any unsaved changes and clears all repairs.
|
Also saves cache to persist any unsaved changes.
|
||||||
"""
|
"""
|
||||||
# Cancel all timers first
|
# Cancel all timers first
|
||||||
self._listener_manager.cancel_timers()
|
self._listener_manager.cancel_timers()
|
||||||
|
|
||||||
# Clear all repairs when integration is removed or disabled
|
|
||||||
await self._repair_manager.clear_all_repairs()
|
|
||||||
|
|
||||||
# Save cache to persist any unsaved data
|
# Save cache to persist any unsaved data
|
||||||
# This ensures we don't lose data if HA is shutting down
|
# This ensures we don't lose data if HA is shutting down
|
||||||
try:
|
try:
|
||||||
|
|
@ -666,21 +535,19 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
||||||
|
|
||||||
# Transition lifecycle state from "fresh" to "cached" if enough time passed
|
# Transition lifecycle state from "fresh" to "cached" if enough time passed
|
||||||
# (5 minutes threshold defined in lifecycle calculator)
|
# (5 minutes threshold defined in lifecycle calculator)
|
||||||
# Note: This updates _lifecycle_state for diagnostics only.
|
if self._lifecycle_state == "fresh" and self._last_price_update:
|
||||||
# The lifecycle sensor calculates its state dynamically in get_lifecycle_state(),
|
age = current_time - self._last_price_update
|
||||||
# checking _last_price_update timestamp directly.
|
if age.total_seconds() > FRESH_TO_CACHED_SECONDS:
|
||||||
if self._lifecycle_state == "fresh":
|
self._lifecycle_state = "cached"
|
||||||
# After 5 minutes, data is considered "cached" (no longer "just fetched")
|
|
||||||
self._lifecycle_state = "cached"
|
|
||||||
|
|
||||||
# Update helper modules with fresh TimeService instance
|
# Update helper modules with fresh TimeService instance
|
||||||
self.api.time = self.time
|
self.api.time = self.time
|
||||||
self._price_data_manager.time = self.time
|
self._data_fetcher.time = self.time
|
||||||
self._data_transformer.time = self.time
|
self._data_transformer.time = self.time
|
||||||
self._period_calculator.time = self.time
|
self._period_calculator.time = self.time
|
||||||
|
|
||||||
# Load cache if not already loaded (user data only, price data is in Pool)
|
# Load cache if not already loaded
|
||||||
if self._cached_user_data is None:
|
if self._cached_price_data is None and self._cached_user_data is None:
|
||||||
await self.load_cache()
|
await self.load_cache()
|
||||||
|
|
||||||
# Initialize midnight handler on first run
|
# Initialize midnight handler on first run
|
||||||
|
|
@ -717,44 +584,47 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
||||||
self._api_calls_today = 0
|
self._api_calls_today = 0
|
||||||
self._last_api_call_date = current_date
|
self._last_api_call_date = current_date
|
||||||
|
|
||||||
# Set _is_fetching flag - lifecycle sensor shows "refreshing" during fetch
|
# Track last_price_update timestamp before fetch to detect if data actually changed
|
||||||
# Note: Lifecycle sensor reads this flag directly in get_lifecycle_state()
|
old_price_update = self._last_price_update
|
||||||
self._is_fetching = True
|
|
||||||
|
|
||||||
# Get current price info to check if tomorrow data already exists
|
# CRITICAL: Check if we need to fetch data BEFORE starting the fetch
|
||||||
current_price_info = self.data.get("priceInfo", []) if self.data else []
|
# This allows the lifecycle sensor to show "searching_tomorrow" status
|
||||||
|
# when we're actively looking for tomorrow's data after 13:00
|
||||||
|
should_update = self._data_fetcher.should_update_price_data(current_time)
|
||||||
|
|
||||||
result, api_called = await self._price_data_manager.handle_main_entry_update(
|
# Set _is_fetching flag if we're about to fetch data
|
||||||
|
# This makes the lifecycle sensor show "refreshing" status during the API call
|
||||||
|
if should_update:
|
||||||
|
self._is_fetching = True
|
||||||
|
# Immediately notify lifecycle sensor about state change
|
||||||
|
# This ensures "refreshing" or "searching_tomorrow" appears DURING the fetch
|
||||||
|
self.async_update_listeners()
|
||||||
|
|
||||||
|
result = await self._data_fetcher.handle_main_entry_update(
|
||||||
current_time,
|
current_time,
|
||||||
self._home_id,
|
self._home_id,
|
||||||
self._transform_data,
|
self._transform_data,
|
||||||
current_price_info=current_price_info,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# CRITICAL: Reset fetching flag AFTER data fetch completes
|
# CRITICAL: Reset fetching flag AFTER data fetch completes
|
||||||
self._is_fetching = False
|
self._is_fetching = False
|
||||||
|
|
||||||
# Sync user_data cache (price data is in IntervalPool)
|
# CRITICAL: Sync cached data after API call
|
||||||
self._cached_user_data = self._price_data_manager.cached_user_data
|
# handle_main_entry_update() updates data_fetcher's cache, we need to sync:
|
||||||
|
# 1. cached_user_data (for new integrations, may be fetched via update_user_data_if_needed())
|
||||||
|
# 2. cached_price_data (CRITICAL: contains tomorrow data, needed for _needs_tomorrow_data())
|
||||||
|
# 3. _last_price_update (for lifecycle tracking: cache age, fresh state detection)
|
||||||
|
self._cached_user_data = self._data_fetcher.cached_user_data
|
||||||
|
self._cached_price_data = self._data_fetcher.cached_price_data
|
||||||
|
self._last_price_update = self._data_fetcher._last_price_update # noqa: SLF001 - Sync for lifecycle tracking
|
||||||
|
|
||||||
# Update lifecycle tracking - ONLY if API was actually called
|
# Update lifecycle tracking only if we fetched NEW data (timestamp changed)
|
||||||
# (not when returning cached data)
|
# This prevents recorder spam from state changes when returning cached data
|
||||||
if api_called and result and "priceInfo" in result and len(result["priceInfo"]) > 0:
|
if self._last_price_update != old_price_update:
|
||||||
self._last_price_update = current_time # Track when data was fetched from API
|
|
||||||
self._api_calls_today += 1
|
self._api_calls_today += 1
|
||||||
self._lifecycle_state = "fresh" # Data just fetched
|
self._lifecycle_state = "fresh" # Data just fetched
|
||||||
_LOGGER.debug(
|
# No separate lifecycle notification needed - normal async_update_listeners()
|
||||||
"API call completed: Fetched %d intervals, updating lifecycle to 'fresh'",
|
# will trigger all entities (including lifecycle sensor) after this return
|
||||||
len(result["priceInfo"]),
|
|
||||||
)
|
|
||||||
# Note: _lifecycle_state is for diagnostics only.
|
|
||||||
# Lifecycle sensor calculates state dynamically from _last_price_update.
|
|
||||||
elif not api_called:
|
|
||||||
# Using cached data - lifecycle stays as is (cached/searching_tomorrow/etc.)
|
|
||||||
_LOGGER.debug(
|
|
||||||
"Using cached data: %d intervals from pool, no API call made",
|
|
||||||
len(result.get("priceInfo", [])),
|
|
||||||
)
|
|
||||||
except (
|
except (
|
||||||
TibberPricesApiClientAuthenticationError,
|
TibberPricesApiClientAuthenticationError,
|
||||||
TibberPricesApiClientCommunicationError,
|
TibberPricesApiClientCommunicationError,
|
||||||
|
|
@ -762,80 +632,44 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
||||||
) as err:
|
) as err:
|
||||||
# Reset lifecycle state on error
|
# Reset lifecycle state on error
|
||||||
self._is_fetching = False
|
self._is_fetching = False
|
||||||
self._lifecycle_state = "error" # For diagnostics
|
self._lifecycle_state = "error"
|
||||||
# Note: Lifecycle sensor detects errors via coordinator.last_exception
|
# No separate lifecycle notification needed - error case returns data
|
||||||
|
# which triggers normal async_update_listeners()
|
||||||
# Track rate limit errors for repair system
|
return await self._data_fetcher.handle_api_error(
|
||||||
await self._track_rate_limit_error(err)
|
err,
|
||||||
|
self._transform_data,
|
||||||
# Handle API error - will re-raise as ConfigEntryAuthFailed or UpdateFailed
|
)
|
||||||
# Note: With IntervalPool, there's no local cache fallback here.
|
|
||||||
# The Pool has its own persistence for offline recovery.
|
|
||||||
await self._price_data_manager.handle_api_error(err)
|
|
||||||
# Note: handle_api_error always raises, this is never reached
|
|
||||||
return {} # Satisfy type checker
|
|
||||||
else:
|
else:
|
||||||
# Check for repair conditions after successful update
|
|
||||||
await self._check_repair_conditions(result, current_time)
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
async def _track_rate_limit_error(self, error: Exception) -> None:
|
|
||||||
"""Track rate limit errors for repair notification system."""
|
|
||||||
error_str = str(error).lower()
|
|
||||||
is_rate_limit = "429" in error_str or "rate limit" in error_str or "too many requests" in error_str
|
|
||||||
if is_rate_limit:
|
|
||||||
await self._repair_manager.track_rate_limit_error()
|
|
||||||
|
|
||||||
async def _check_repair_conditions(
|
|
||||||
self,
|
|
||||||
result: dict[str, Any],
|
|
||||||
current_time: datetime,
|
|
||||||
) -> None:
|
|
||||||
"""Check and manage repair conditions after successful data update."""
|
|
||||||
# 1. Home not found detection (home was removed from Tibber account)
|
|
||||||
if result and result.get("_home_not_found"):
|
|
||||||
await self._repair_manager.create_home_not_found_repair()
|
|
||||||
# Remove the marker before returning to entities
|
|
||||||
result.pop("_home_not_found", None)
|
|
||||||
else:
|
|
||||||
# Home exists - clear any existing repair
|
|
||||||
await self._repair_manager.clear_home_not_found_repair()
|
|
||||||
|
|
||||||
# 2. Tomorrow data availability (after 18:00)
|
|
||||||
if result and "priceInfo" in result:
|
|
||||||
has_tomorrow_data = self._price_data_manager.has_tomorrow_data(result["priceInfo"])
|
|
||||||
await self._repair_manager.check_tomorrow_data_availability(
|
|
||||||
has_tomorrow_data=has_tomorrow_data,
|
|
||||||
current_time=current_time,
|
|
||||||
)
|
|
||||||
|
|
||||||
# 3. Clear rate limit tracking on successful API call
|
|
||||||
await self._repair_manager.clear_rate_limit_tracking()
|
|
||||||
|
|
||||||
async def load_cache(self) -> None:
|
async def load_cache(self) -> None:
|
||||||
"""Load cached user data from storage (price data is in IntervalPool)."""
|
"""Load cached data from storage."""
|
||||||
await self._price_data_manager.load_cache()
|
await self._data_fetcher.load_cache()
|
||||||
# Sync user data reference
|
# Sync legacy references
|
||||||
self._cached_user_data = self._price_data_manager.cached_user_data
|
self._cached_price_data = self._data_fetcher.cached_price_data
|
||||||
self._last_user_update = self._price_data_manager._last_user_update # noqa: SLF001 - Sync for lifecycle tracking
|
self._cached_user_data = self._data_fetcher.cached_user_data
|
||||||
|
self._last_price_update = self._data_fetcher._last_price_update # noqa: SLF001 - Sync for lifecycle tracking
|
||||||
|
self._last_user_update = self._data_fetcher._last_user_update # noqa: SLF001 - Sync for lifecycle tracking
|
||||||
|
|
||||||
# Note: Midnight handler state is now based on current date
|
# CRITICAL: Restore midnight handler state from cache
|
||||||
# Since price data is in IntervalPool (persistent), we just need to
|
# If cache is from today, assume turnover already happened at midnight
|
||||||
# ensure turnover doesn't happen twice if HA restarts after midnight
|
# This allows proper turnover detection after HA restart
|
||||||
today_midnight = self.time.as_local(self.time.now()).replace(hour=0, minute=0, second=0, microsecond=0)
|
if self._last_price_update:
|
||||||
# Mark today's midnight as done to prevent double turnover on HA restart
|
cache_date = self.time.as_local(self._last_price_update).date()
|
||||||
self._midnight_handler.mark_turnover_done(today_midnight)
|
today_date = self.time.as_local(self.time.now()).date()
|
||||||
|
if cache_date == today_date:
|
||||||
|
# Cache is from today, so midnight turnover already happened
|
||||||
|
today_midnight = self.time.as_local(self.time.now()).replace(hour=0, minute=0, second=0, microsecond=0)
|
||||||
|
# Restore handler state: mark today's midnight as last turnover
|
||||||
|
self._midnight_handler.mark_turnover_done(today_midnight)
|
||||||
|
|
||||||
async def _store_cache(self) -> None:
|
async def _store_cache(self) -> None:
|
||||||
"""Store cache data (user metadata only, price data is in IntervalPool)."""
|
"""Store cache data."""
|
||||||
await self._price_data_manager.store_cache(self._midnight_handler.last_check_time)
|
await self._data_fetcher.store_cache(self._midnight_handler.last_check_time)
|
||||||
|
|
||||||
def _needs_tomorrow_data(self) -> bool:
|
def _needs_tomorrow_data(self) -> bool:
|
||||||
"""Check if tomorrow data is missing or invalid."""
|
"""Check if tomorrow data is missing or invalid."""
|
||||||
# Check self.data (from Pool) instead of _cached_price_data
|
return helpers.needs_tomorrow_data(self._cached_price_data)
|
||||||
if not self.data or "priceInfo" not in self.data:
|
|
||||||
return True
|
|
||||||
return helpers.needs_tomorrow_data({"price_info": self.data["priceInfo"]})
|
|
||||||
|
|
||||||
def _has_valid_tomorrow_data(self) -> bool:
|
def _has_valid_tomorrow_data(self) -> bool:
|
||||||
"""Check if we have valid tomorrow data (inverse of _needs_tomorrow_data)."""
|
"""Check if we have valid tomorrow data (inverse of _needs_tomorrow_data)."""
|
||||||
|
|
@ -843,12 +677,12 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
||||||
|
|
||||||
@callback
|
@callback
|
||||||
def _merge_cached_data(self) -> dict[str, Any]:
|
def _merge_cached_data(self) -> dict[str, Any]:
|
||||||
"""Return current data (from Pool)."""
|
"""Merge cached data into the expected format for main entry."""
|
||||||
if not self.data:
|
if not self._cached_price_data:
|
||||||
return {}
|
return {}
|
||||||
return self.data
|
return self._transform_data(self._cached_price_data)
|
||||||
|
|
||||||
def _get_threshold_percentages(self) -> dict[str, int | float]:
|
def _get_threshold_percentages(self) -> dict[str, int]:
|
||||||
"""Get threshold percentages from config options."""
|
"""Get threshold percentages from config options."""
|
||||||
return self._data_transformer.get_threshold_percentages()
|
return self._data_transformer.get_threshold_percentages()
|
||||||
|
|
||||||
|
|
|
||||||
331
custom_components/tibber_prices/coordinator/data_fetching.py
Normal file
331
custom_components/tibber_prices/coordinator/data_fetching.py
Normal file
|
|
@ -0,0 +1,331 @@
|
||||||
|
"""Data fetching logic for the coordinator."""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
import secrets
|
||||||
|
from typing import TYPE_CHECKING, Any
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from datetime import timedelta
|
||||||
|
|
||||||
|
from custom_components.tibber_prices.api import (
|
||||||
|
TibberPricesApiClientAuthenticationError,
|
||||||
|
TibberPricesApiClientCommunicationError,
|
||||||
|
TibberPricesApiClientError,
|
||||||
|
)
|
||||||
|
from homeassistant.core import callback
|
||||||
|
from homeassistant.exceptions import ConfigEntryAuthFailed
|
||||||
|
from homeassistant.helpers.update_coordinator import UpdateFailed
|
||||||
|
|
||||||
|
from . import cache, helpers
|
||||||
|
from .constants import TOMORROW_DATA_CHECK_HOUR, TOMORROW_DATA_RANDOM_DELAY_MAX
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from collections.abc import Callable
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
from custom_components.tibber_prices.api import TibberPricesApiClient
|
||||||
|
|
||||||
|
from .time_service import TibberPricesTimeService
|
||||||
|
|
||||||
|
_LOGGER = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class TibberPricesDataFetcher:
|
||||||
|
"""Handles data fetching, caching, and main/subentry coordination."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
api: TibberPricesApiClient,
|
||||||
|
store: Any,
|
||||||
|
log_prefix: str,
|
||||||
|
user_update_interval: timedelta,
|
||||||
|
time: TibberPricesTimeService,
|
||||||
|
) -> None:
|
||||||
|
"""Initialize the data fetcher."""
|
||||||
|
self.api = api
|
||||||
|
self._store = store
|
||||||
|
self._log_prefix = log_prefix
|
||||||
|
self._user_update_interval = user_update_interval
|
||||||
|
self.time: TibberPricesTimeService = time
|
||||||
|
|
||||||
|
# Cached data
|
||||||
|
self._cached_price_data: dict[str, Any] | None = None
|
||||||
|
self._cached_user_data: dict[str, Any] | None = None
|
||||||
|
self._last_price_update: datetime | None = None
|
||||||
|
self._last_user_update: datetime | None = None
|
||||||
|
|
||||||
|
def _log(self, level: str, message: str, *args: object, **kwargs: object) -> None:
|
||||||
|
"""Log with coordinator-specific prefix."""
|
||||||
|
prefixed_message = f"{self._log_prefix} {message}"
|
||||||
|
getattr(_LOGGER, level)(prefixed_message, *args, **kwargs)
|
||||||
|
|
||||||
|
async def load_cache(self) -> None:
|
||||||
|
"""Load cached data from storage."""
|
||||||
|
cache_data = await cache.load_cache(self._store, self._log_prefix, time=self.time)
|
||||||
|
|
||||||
|
self._cached_price_data = cache_data.price_data
|
||||||
|
self._cached_user_data = cache_data.user_data
|
||||||
|
self._last_price_update = cache_data.last_price_update
|
||||||
|
self._last_user_update = cache_data.last_user_update
|
||||||
|
|
||||||
|
# Parse timestamps if we loaded price data from cache
|
||||||
|
if self._cached_price_data:
|
||||||
|
self._cached_price_data = helpers.parse_all_timestamps(self._cached_price_data, time=self.time)
|
||||||
|
|
||||||
|
# Validate cache: check if price data is from a previous day
|
||||||
|
if not cache.is_cache_valid(cache_data, self._log_prefix, time=self.time):
|
||||||
|
self._log("info", "Cached price data is from a previous day, clearing cache to fetch fresh data")
|
||||||
|
self._cached_price_data = None
|
||||||
|
self._last_price_update = None
|
||||||
|
await self.store_cache()
|
||||||
|
|
||||||
|
async def store_cache(self, last_midnight_check: datetime | None = None) -> None:
|
||||||
|
"""Store cache data."""
|
||||||
|
cache_data = cache.TibberPricesCacheData(
|
||||||
|
price_data=self._cached_price_data,
|
||||||
|
user_data=self._cached_user_data,
|
||||||
|
last_price_update=self._last_price_update,
|
||||||
|
last_user_update=self._last_user_update,
|
||||||
|
last_midnight_check=last_midnight_check,
|
||||||
|
)
|
||||||
|
await cache.save_cache(self._store, cache_data, self._log_prefix)
|
||||||
|
|
||||||
|
async def update_user_data_if_needed(self, current_time: datetime) -> bool:
|
||||||
|
"""
|
||||||
|
Update user data if needed (daily check).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if user data was updated, False otherwise
|
||||||
|
|
||||||
|
"""
|
||||||
|
if self._last_user_update is None or current_time - self._last_user_update >= self._user_update_interval:
|
||||||
|
try:
|
||||||
|
self._log("debug", "Updating user data")
|
||||||
|
user_data = await self.api.async_get_viewer_details()
|
||||||
|
self._cached_user_data = user_data
|
||||||
|
self._last_user_update = current_time
|
||||||
|
self._log("debug", "User data updated successfully")
|
||||||
|
except (
|
||||||
|
TibberPricesApiClientError,
|
||||||
|
TibberPricesApiClientCommunicationError,
|
||||||
|
) as ex:
|
||||||
|
self._log("warning", "Failed to update user data: %s", ex)
|
||||||
|
return False # Update failed
|
||||||
|
else:
|
||||||
|
return True # User data was updated
|
||||||
|
return False # No update needed
|
||||||
|
|
||||||
|
@callback
|
||||||
|
def should_update_price_data(self, current_time: datetime) -> bool | str:
|
||||||
|
"""
|
||||||
|
Check if price data should be updated from the API.
|
||||||
|
|
||||||
|
API calls only happen when truly needed:
|
||||||
|
1. No cached data exists
|
||||||
|
2. Cache is invalid (from previous day - detected by _is_cache_valid)
|
||||||
|
3. After 13:00 local time and tomorrow's data is missing or invalid
|
||||||
|
|
||||||
|
Cache validity is ensured by:
|
||||||
|
- _is_cache_valid() checks date mismatch on load
|
||||||
|
- Midnight turnover clears cache (Timer #2)
|
||||||
|
- Tomorrow data validation after 13:00
|
||||||
|
|
||||||
|
No periodic "safety" updates - trust the cache validation!
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool or str: True for immediate update, "tomorrow_check" for tomorrow
|
||||||
|
data check (needs random delay), False for no update
|
||||||
|
|
||||||
|
"""
|
||||||
|
if self._cached_price_data is None:
|
||||||
|
self._log("debug", "API update needed: No cached price data")
|
||||||
|
return True
|
||||||
|
if self._last_price_update is None:
|
||||||
|
self._log("debug", "API update needed: No last price update timestamp")
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Check if after 13:00 and tomorrow data is missing or invalid
|
||||||
|
now_local = self.time.as_local(current_time)
|
||||||
|
if now_local.hour >= TOMORROW_DATA_CHECK_HOUR and self._cached_price_data and self.needs_tomorrow_data():
|
||||||
|
self._log(
|
||||||
|
"info",
|
||||||
|
"API update needed: After %s:00 and tomorrow's data missing/invalid",
|
||||||
|
TOMORROW_DATA_CHECK_HOUR,
|
||||||
|
)
|
||||||
|
# Return special marker to indicate this is a tomorrow data check
|
||||||
|
# Caller should add random delay to spread load
|
||||||
|
return "tomorrow_check"
|
||||||
|
|
||||||
|
# No update needed - cache is valid and complete
|
||||||
|
self._log("debug", "No API update needed: Cache is valid and complete")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def needs_tomorrow_data(self) -> bool:
|
||||||
|
"""Check if tomorrow data is missing or invalid."""
|
||||||
|
return helpers.needs_tomorrow_data(self._cached_price_data)
|
||||||
|
|
||||||
|
async def fetch_home_data(self, home_id: str, current_time: datetime) -> dict[str, Any]:
|
||||||
|
"""Fetch data for a single home."""
|
||||||
|
if not home_id:
|
||||||
|
self._log("warning", "No home ID provided - cannot fetch price data")
|
||||||
|
return {
|
||||||
|
"timestamp": current_time,
|
||||||
|
"home_id": "",
|
||||||
|
"price_info": [],
|
||||||
|
"currency": "EUR",
|
||||||
|
}
|
||||||
|
|
||||||
|
# Ensure we have user_data before fetching price data
|
||||||
|
# This is critical for timezone-aware cursor calculation
|
||||||
|
if not self._cached_user_data:
|
||||||
|
self._log("info", "User data not cached, fetching before price data")
|
||||||
|
try:
|
||||||
|
user_data = await self.api.async_get_viewer_details()
|
||||||
|
self._cached_user_data = user_data
|
||||||
|
self._last_user_update = current_time
|
||||||
|
except (
|
||||||
|
TibberPricesApiClientError,
|
||||||
|
TibberPricesApiClientCommunicationError,
|
||||||
|
) as ex:
|
||||||
|
msg = f"Failed to fetch user data (required for price fetching): {ex}"
|
||||||
|
self._log("error", msg)
|
||||||
|
raise TibberPricesApiClientError(msg) from ex
|
||||||
|
|
||||||
|
# Get price data for this home
|
||||||
|
# Pass user_data for timezone-aware cursor calculation
|
||||||
|
# At this point, _cached_user_data is guaranteed to be not None (checked above)
|
||||||
|
if not self._cached_user_data:
|
||||||
|
msg = "User data unexpectedly None after fetch attempt"
|
||||||
|
raise TibberPricesApiClientError(msg)
|
||||||
|
|
||||||
|
self._log("debug", "Fetching price data for home %s", home_id)
|
||||||
|
home_data = await self.api.async_get_price_info(
|
||||||
|
home_id=home_id,
|
||||||
|
user_data=self._cached_user_data,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Extract currency for this home from user_data
|
||||||
|
currency = self._get_currency_for_home(home_id)
|
||||||
|
|
||||||
|
price_info = home_data.get("price_info", [])
|
||||||
|
|
||||||
|
self._log("debug", "Successfully fetched data for home %s (%d intervals)", home_id, len(price_info))
|
||||||
|
|
||||||
|
return {
|
||||||
|
"timestamp": current_time,
|
||||||
|
"home_id": home_id,
|
||||||
|
"price_info": price_info,
|
||||||
|
"currency": currency,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _get_currency_for_home(self, home_id: str) -> str:
|
||||||
|
"""Get currency for a specific home from cached user_data."""
|
||||||
|
if not self._cached_user_data:
|
||||||
|
self._log("warning", "No user data cached, using EUR as default currency")
|
||||||
|
return "EUR"
|
||||||
|
|
||||||
|
viewer = self._cached_user_data.get("viewer", {})
|
||||||
|
homes = viewer.get("homes", [])
|
||||||
|
|
||||||
|
for home in homes:
|
||||||
|
if home.get("id") == home_id:
|
||||||
|
# Extract currency from nested structure (with fallback to EUR)
|
||||||
|
currency = (
|
||||||
|
home.get("currentSubscription", {}).get("priceInfo", {}).get("current", {}).get("currency", "EUR")
|
||||||
|
)
|
||||||
|
self._log("debug", "Extracted currency %s for home %s", currency, home_id)
|
||||||
|
return currency
|
||||||
|
|
||||||
|
self._log("warning", "Home %s not found in user data, using EUR as default", home_id)
|
||||||
|
return "EUR"
|
||||||
|
|
||||||
|
async def handle_main_entry_update(
|
||||||
|
self,
|
||||||
|
current_time: datetime,
|
||||||
|
home_id: str,
|
||||||
|
transform_fn: Callable[[dict[str, Any]], dict[str, Any]],
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""Handle update for main entry - fetch data for this home."""
|
||||||
|
# Update user data if needed (daily check)
|
||||||
|
user_data_updated = await self.update_user_data_if_needed(current_time)
|
||||||
|
|
||||||
|
# Check if we need to update price data
|
||||||
|
should_update = self.should_update_price_data(current_time)
|
||||||
|
|
||||||
|
if should_update:
|
||||||
|
# If this is a tomorrow data check, add random delay to spread API load
|
||||||
|
if should_update == "tomorrow_check":
|
||||||
|
# Use secrets for better randomness distribution
|
||||||
|
delay = secrets.randbelow(TOMORROW_DATA_RANDOM_DELAY_MAX + 1)
|
||||||
|
self._log(
|
||||||
|
"debug",
|
||||||
|
"Tomorrow data check - adding random delay of %d seconds to spread load",
|
||||||
|
delay,
|
||||||
|
)
|
||||||
|
await asyncio.sleep(delay)
|
||||||
|
|
||||||
|
self._log("debug", "Fetching fresh price data from API")
|
||||||
|
raw_data = await self.fetch_home_data(home_id, current_time)
|
||||||
|
# Parse timestamps immediately after API fetch
|
||||||
|
raw_data = helpers.parse_all_timestamps(raw_data, time=self.time)
|
||||||
|
# Cache the data (now with datetime objects)
|
||||||
|
self._cached_price_data = raw_data
|
||||||
|
self._last_price_update = current_time
|
||||||
|
await self.store_cache()
|
||||||
|
# Transform for main entry
|
||||||
|
return transform_fn(raw_data)
|
||||||
|
|
||||||
|
# Use cached data if available
|
||||||
|
if self._cached_price_data is not None:
|
||||||
|
# If user data was updated, we need to return transformed data to trigger entity updates
|
||||||
|
# This ensures diagnostic sensors (home_type, grid_company, etc.) get refreshed
|
||||||
|
if user_data_updated:
|
||||||
|
self._log("debug", "User data updated - returning transformed data to update diagnostic sensors")
|
||||||
|
else:
|
||||||
|
self._log("debug", "Using cached price data (no API call needed)")
|
||||||
|
return transform_fn(self._cached_price_data)
|
||||||
|
|
||||||
|
# Fallback: no cache and no update needed (shouldn't happen)
|
||||||
|
self._log("warning", "No cached data available and update not triggered - returning empty data")
|
||||||
|
return {
|
||||||
|
"timestamp": current_time,
|
||||||
|
"home_id": home_id,
|
||||||
|
"priceInfo": [],
|
||||||
|
"currency": "",
|
||||||
|
}
|
||||||
|
|
||||||
|
async def handle_api_error(
|
||||||
|
self,
|
||||||
|
error: Exception,
|
||||||
|
transform_fn: Callable[[dict[str, Any]], dict[str, Any]],
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""Handle API errors with fallback to cached data."""
|
||||||
|
if isinstance(error, TibberPricesApiClientAuthenticationError):
|
||||||
|
msg = "Invalid access token"
|
||||||
|
raise ConfigEntryAuthFailed(msg) from error
|
||||||
|
|
||||||
|
# Use cached data as fallback if available
|
||||||
|
if self._cached_price_data is not None:
|
||||||
|
self._log("warning", "API error, using cached data: %s", error)
|
||||||
|
return transform_fn(self._cached_price_data)
|
||||||
|
|
||||||
|
msg = f"Error communicating with API: {error}"
|
||||||
|
raise UpdateFailed(msg) from error
|
||||||
|
|
||||||
|
@property
|
||||||
|
def cached_price_data(self) -> dict[str, Any] | None:
|
||||||
|
"""Get cached price data."""
|
||||||
|
return self._cached_price_data
|
||||||
|
|
||||||
|
@cached_price_data.setter
|
||||||
|
def cached_price_data(self, value: dict[str, Any] | None) -> None:
|
||||||
|
"""Set cached price data."""
|
||||||
|
self._cached_price_data = value
|
||||||
|
|
||||||
|
@property
|
||||||
|
def cached_user_data(self) -> dict[str, Any] | None:
|
||||||
|
"""Get cached user data."""
|
||||||
|
return self._cached_user_data
|
||||||
|
|
@ -2,7 +2,6 @@
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import copy
|
|
||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING, Any
|
from typing import TYPE_CHECKING, Any
|
||||||
|
|
||||||
|
|
@ -49,50 +48,19 @@ class TibberPricesDataTransformer:
|
||||||
prefixed_message = f"{self._log_prefix} {message}"
|
prefixed_message = f"{self._log_prefix} {message}"
|
||||||
getattr(_LOGGER, level)(prefixed_message, *args, **kwargs)
|
getattr(_LOGGER, level)(prefixed_message, *args, **kwargs)
|
||||||
|
|
||||||
def get_threshold_percentages(self) -> dict[str, int | float]:
|
def get_threshold_percentages(self) -> dict[str, int]:
|
||||||
"""
|
"""Get threshold percentages from config options."""
|
||||||
Get threshold percentages, hysteresis and gap tolerance for RATING_LEVEL from config options.
|
|
||||||
|
|
||||||
CRITICAL: This function is ONLY for rating_level (internal calculation: LOW/NORMAL/HIGH).
|
|
||||||
Do NOT use for price level (Tibber API: VERY_CHEAP/CHEAP/NORMAL/EXPENSIVE/VERY_EXPENSIVE).
|
|
||||||
"""
|
|
||||||
options = self.config_entry.options or {}
|
options = self.config_entry.options or {}
|
||||||
return {
|
return {
|
||||||
"low": options.get(_const.CONF_PRICE_RATING_THRESHOLD_LOW, _const.DEFAULT_PRICE_RATING_THRESHOLD_LOW),
|
"low": options.get(_const.CONF_PRICE_RATING_THRESHOLD_LOW, _const.DEFAULT_PRICE_RATING_THRESHOLD_LOW),
|
||||||
"high": options.get(_const.CONF_PRICE_RATING_THRESHOLD_HIGH, _const.DEFAULT_PRICE_RATING_THRESHOLD_HIGH),
|
"high": options.get(_const.CONF_PRICE_RATING_THRESHOLD_HIGH, _const.DEFAULT_PRICE_RATING_THRESHOLD_HIGH),
|
||||||
"hysteresis": options.get(_const.CONF_PRICE_RATING_HYSTERESIS, _const.DEFAULT_PRICE_RATING_HYSTERESIS),
|
|
||||||
"gap_tolerance": options.get(
|
|
||||||
_const.CONF_PRICE_RATING_GAP_TOLERANCE, _const.DEFAULT_PRICE_RATING_GAP_TOLERANCE
|
|
||||||
),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def get_level_gap_tolerance(self) -> int:
|
|
||||||
"""
|
|
||||||
Get gap tolerance for PRICE LEVEL (Tibber API) from config options.
|
|
||||||
|
|
||||||
CRITICAL: This is separate from rating_level gap tolerance.
|
|
||||||
Price level comes from Tibber API (VERY_CHEAP/CHEAP/NORMAL/EXPENSIVE/VERY_EXPENSIVE).
|
|
||||||
Rating level is calculated internally (LOW/NORMAL/HIGH).
|
|
||||||
"""
|
|
||||||
options = self.config_entry.options or {}
|
|
||||||
return options.get(_const.CONF_PRICE_LEVEL_GAP_TOLERANCE, _const.DEFAULT_PRICE_LEVEL_GAP_TOLERANCE)
|
|
||||||
|
|
||||||
def invalidate_config_cache(self) -> None:
|
def invalidate_config_cache(self) -> None:
|
||||||
"""
|
"""Invalidate config cache when options change."""
|
||||||
Invalidate config cache AND transformation cache when options change.
|
|
||||||
|
|
||||||
CRITICAL: When options like gap_tolerance, hysteresis, or price_level_gap_tolerance
|
|
||||||
change, we must clear BOTH caches:
|
|
||||||
1. Config cache (_config_cache) - forces config rebuild on next check
|
|
||||||
2. Transformation cache (_cached_transformed_data) - forces data re-enrichment
|
|
||||||
|
|
||||||
This ensures that the next call to transform_data() will re-calculate
|
|
||||||
rating_levels and apply new gap tolerance settings to existing price data.
|
|
||||||
"""
|
|
||||||
self._config_cache_valid = False
|
self._config_cache_valid = False
|
||||||
self._config_cache = None
|
self._config_cache = None
|
||||||
self._cached_transformed_data = None # Force re-transformation with new config
|
self._log("debug", "Config cache invalidated")
|
||||||
self._last_transformation_config = None # Force config comparison to trigger
|
|
||||||
|
|
||||||
def _get_current_transformation_config(self) -> dict[str, Any]:
|
def _get_current_transformation_config(self) -> dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
|
|
@ -105,53 +73,36 @@ class TibberPricesDataTransformer:
|
||||||
return self._config_cache
|
return self._config_cache
|
||||||
|
|
||||||
# Build config dictionary (expensive operation)
|
# Build config dictionary (expensive operation)
|
||||||
options = self.config_entry.options
|
|
||||||
|
|
||||||
# Best/peak price remain nested (multi-section steps)
|
|
||||||
best_period_section = options.get("period_settings", {})
|
|
||||||
best_flex_section = options.get("flexibility_settings", {})
|
|
||||||
best_relax_section = options.get("relaxation_and_target_periods", {})
|
|
||||||
peak_period_section = options.get("period_settings", {})
|
|
||||||
peak_flex_section = options.get("flexibility_settings", {})
|
|
||||||
peak_relax_section = options.get("relaxation_and_target_periods", {})
|
|
||||||
|
|
||||||
config = {
|
config = {
|
||||||
"thresholds": self.get_threshold_percentages(),
|
"thresholds": self.get_threshold_percentages(),
|
||||||
"level_gap_tolerance": self.get_level_gap_tolerance(), # Separate: Tibber's price level smoothing
|
|
||||||
# Volatility thresholds now flat (single-section step)
|
|
||||||
"volatility_thresholds": {
|
"volatility_thresholds": {
|
||||||
"moderate": options.get(_const.CONF_VOLATILITY_THRESHOLD_MODERATE, 15.0),
|
"moderate": self.config_entry.options.get(_const.CONF_VOLATILITY_THRESHOLD_MODERATE, 15.0),
|
||||||
"high": options.get(_const.CONF_VOLATILITY_THRESHOLD_HIGH, 25.0),
|
"high": self.config_entry.options.get(_const.CONF_VOLATILITY_THRESHOLD_HIGH, 25.0),
|
||||||
"very_high": options.get(_const.CONF_VOLATILITY_THRESHOLD_VERY_HIGH, 40.0),
|
"very_high": self.config_entry.options.get(_const.CONF_VOLATILITY_THRESHOLD_VERY_HIGH, 40.0),
|
||||||
},
|
|
||||||
# Price trend thresholds now flat (single-section step)
|
|
||||||
"price_trend_thresholds": {
|
|
||||||
"rising": options.get(
|
|
||||||
_const.CONF_PRICE_TREND_THRESHOLD_RISING, _const.DEFAULT_PRICE_TREND_THRESHOLD_RISING
|
|
||||||
),
|
|
||||||
"falling": options.get(
|
|
||||||
_const.CONF_PRICE_TREND_THRESHOLD_FALLING, _const.DEFAULT_PRICE_TREND_THRESHOLD_FALLING
|
|
||||||
),
|
|
||||||
},
|
},
|
||||||
"best_price_config": {
|
"best_price_config": {
|
||||||
"flex": best_flex_section.get(_const.CONF_BEST_PRICE_FLEX, 15.0),
|
"flex": self.config_entry.options.get(_const.CONF_BEST_PRICE_FLEX, 15.0),
|
||||||
"max_level": best_period_section.get(_const.CONF_BEST_PRICE_MAX_LEVEL, "NORMAL"),
|
"max_level": self.config_entry.options.get(_const.CONF_BEST_PRICE_MAX_LEVEL, "NORMAL"),
|
||||||
"min_period_length": best_period_section.get(_const.CONF_BEST_PRICE_MIN_PERIOD_LENGTH, 4),
|
"min_period_length": self.config_entry.options.get(_const.CONF_BEST_PRICE_MIN_PERIOD_LENGTH, 4),
|
||||||
"min_distance_from_avg": best_flex_section.get(_const.CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG, -5.0),
|
"min_distance_from_avg": self.config_entry.options.get(
|
||||||
"max_level_gap_count": best_period_section.get(_const.CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT, 0),
|
_const.CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG, -5.0
|
||||||
"enable_min_periods": best_relax_section.get(_const.CONF_ENABLE_MIN_PERIODS_BEST, False),
|
),
|
||||||
"min_periods": best_relax_section.get(_const.CONF_MIN_PERIODS_BEST, 2),
|
"max_level_gap_count": self.config_entry.options.get(_const.CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT, 0),
|
||||||
"relaxation_attempts": best_relax_section.get(_const.CONF_RELAXATION_ATTEMPTS_BEST, 4),
|
"enable_min_periods": self.config_entry.options.get(_const.CONF_ENABLE_MIN_PERIODS_BEST, False),
|
||||||
|
"min_periods": self.config_entry.options.get(_const.CONF_MIN_PERIODS_BEST, 2),
|
||||||
|
"relaxation_attempts": self.config_entry.options.get(_const.CONF_RELAXATION_ATTEMPTS_BEST, 4),
|
||||||
},
|
},
|
||||||
"peak_price_config": {
|
"peak_price_config": {
|
||||||
"flex": peak_flex_section.get(_const.CONF_PEAK_PRICE_FLEX, 15.0),
|
"flex": self.config_entry.options.get(_const.CONF_PEAK_PRICE_FLEX, 15.0),
|
||||||
"min_level": peak_period_section.get(_const.CONF_PEAK_PRICE_MIN_LEVEL, "HIGH"),
|
"min_level": self.config_entry.options.get(_const.CONF_PEAK_PRICE_MIN_LEVEL, "HIGH"),
|
||||||
"min_period_length": peak_period_section.get(_const.CONF_PEAK_PRICE_MIN_PERIOD_LENGTH, 4),
|
"min_period_length": self.config_entry.options.get(_const.CONF_PEAK_PRICE_MIN_PERIOD_LENGTH, 4),
|
||||||
"min_distance_from_avg": peak_flex_section.get(_const.CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG, 5.0),
|
"min_distance_from_avg": self.config_entry.options.get(
|
||||||
"max_level_gap_count": peak_period_section.get(_const.CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT, 0),
|
_const.CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG, 5.0
|
||||||
"enable_min_periods": peak_relax_section.get(_const.CONF_ENABLE_MIN_PERIODS_PEAK, False),
|
),
|
||||||
"min_periods": peak_relax_section.get(_const.CONF_MIN_PERIODS_PEAK, 2),
|
"max_level_gap_count": self.config_entry.options.get(_const.CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT, 0),
|
||||||
"relaxation_attempts": peak_relax_section.get(_const.CONF_RELAXATION_ATTEMPTS_PEAK, 4),
|
"enable_min_periods": self.config_entry.options.get(_const.CONF_ENABLE_MIN_PERIODS_PEAK, False),
|
||||||
|
"min_periods": self.config_entry.options.get(_const.CONF_MIN_PERIODS_PEAK, 2),
|
||||||
|
"relaxation_attempts": self.config_entry.options.get(_const.CONF_RELAXATION_ATTEMPTS_PEAK, 4),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -184,9 +135,8 @@ class TibberPricesDataTransformer:
|
||||||
|
|
||||||
# Configuration changed - must retransform
|
# Configuration changed - must retransform
|
||||||
current_config = self._get_current_transformation_config()
|
current_config = self._get_current_transformation_config()
|
||||||
config_changed = current_config != self._last_transformation_config
|
if current_config != self._last_transformation_config:
|
||||||
|
self._log("debug", "Configuration changed, retransforming data")
|
||||||
if config_changed:
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# Check for midnight turnover
|
# Check for midnight turnover
|
||||||
|
|
@ -211,29 +161,18 @@ class TibberPricesDataTransformer:
|
||||||
source_data_timestamp = raw_data.get("timestamp")
|
source_data_timestamp = raw_data.get("timestamp")
|
||||||
|
|
||||||
# Return cached transformed data if no retransformation needed
|
# Return cached transformed data if no retransformation needed
|
||||||
should_retransform = self._should_retransform_data(current_time, source_data_timestamp)
|
if (
|
||||||
has_cache = self._cached_transformed_data is not None
|
not self._should_retransform_data(current_time, source_data_timestamp)
|
||||||
|
and self._cached_transformed_data is not None
|
||||||
self._log(
|
):
|
||||||
"info",
|
|
||||||
"transform_data: should_retransform=%s, has_cache=%s",
|
|
||||||
should_retransform,
|
|
||||||
has_cache,
|
|
||||||
)
|
|
||||||
|
|
||||||
if not should_retransform and has_cache:
|
|
||||||
self._log("debug", "Using cached transformed data (no transformation needed)")
|
self._log("debug", "Using cached transformed data (no transformation needed)")
|
||||||
# has_cache ensures _cached_transformed_data is not None
|
return self._cached_transformed_data
|
||||||
return self._cached_transformed_data # type: ignore[return-value]
|
|
||||||
|
|
||||||
self._log("debug", "Transforming price data (enrichment + period calculation)")
|
self._log("debug", "Transforming price data (enrichment + period calculation)")
|
||||||
|
|
||||||
# Extract data from single-home structure
|
# Extract data from single-home structure
|
||||||
home_id = raw_data.get("home_id", "")
|
home_id = raw_data.get("home_id", "")
|
||||||
# CRITICAL: Make a deep copy of intervals to avoid modifying cached raw data
|
all_intervals = raw_data.get("price_info", [])
|
||||||
# The enrichment function modifies intervals in-place, which would corrupt
|
|
||||||
# the original API data and make re-enrichment with different settings impossible
|
|
||||||
all_intervals = copy.deepcopy(raw_data.get("price_info", []))
|
|
||||||
currency = raw_data.get("currency", "EUR")
|
currency = raw_data.get("currency", "EUR")
|
||||||
|
|
||||||
if not all_intervals:
|
if not all_intervals:
|
||||||
|
|
@ -250,16 +189,11 @@ class TibberPricesDataTransformer:
|
||||||
|
|
||||||
# Enrich price info dynamically with calculated differences and rating levels
|
# Enrich price info dynamically with calculated differences and rating levels
|
||||||
# (Modifies all_intervals in-place, returns same list)
|
# (Modifies all_intervals in-place, returns same list)
|
||||||
thresholds = self.get_threshold_percentages() # Only for rating_level
|
thresholds = self.get_threshold_percentages()
|
||||||
level_gap_tolerance = self.get_level_gap_tolerance() # Separate: for Tibber's price level
|
|
||||||
|
|
||||||
enriched_intervals = enrich_price_info_with_differences(
|
enriched_intervals = enrich_price_info_with_differences(
|
||||||
all_intervals,
|
all_intervals,
|
||||||
threshold_low=thresholds["low"],
|
threshold_low=thresholds["low"],
|
||||||
threshold_high=thresholds["high"],
|
threshold_high=thresholds["high"],
|
||||||
hysteresis=float(thresholds["hysteresis"]),
|
|
||||||
gap_tolerance=int(thresholds["gap_tolerance"]),
|
|
||||||
level_gap_tolerance=level_gap_tolerance,
|
|
||||||
time=self.time,
|
time=self.time,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -16,10 +16,8 @@ from .period_building import (
|
||||||
add_interval_ends,
|
add_interval_ends,
|
||||||
build_periods,
|
build_periods,
|
||||||
calculate_reference_prices,
|
calculate_reference_prices,
|
||||||
extend_periods_across_midnight,
|
|
||||||
filter_periods_by_end_date,
|
filter_periods_by_end_date,
|
||||||
filter_periods_by_min_length,
|
filter_periods_by_min_length,
|
||||||
filter_superseded_periods,
|
|
||||||
split_intervals_by_day,
|
split_intervals_by_day,
|
||||||
)
|
)
|
||||||
from .period_statistics import (
|
from .period_statistics import (
|
||||||
|
|
@ -54,10 +52,10 @@ def calculate_periods(
|
||||||
7. Extract period summaries (start/end times, not full price data)
|
7. Extract period summaries (start/end times, not full price data)
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
all_prices: All price data points from yesterday/today/tomorrow.
|
all_prices: All price data points from yesterday/today/tomorrow
|
||||||
config: Period configuration containing reverse_sort, flex, min_distance_from_avg,
|
config: Period configuration containing reverse_sort, flex, min_distance_from_avg,
|
||||||
min_period_length, threshold_low, and threshold_high.
|
min_period_length, threshold_low, and threshold_high
|
||||||
time: TibberPricesTimeService instance (required).
|
time: TibberPricesTimeService instance (required)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dict with:
|
Dict with:
|
||||||
|
|
@ -185,14 +183,12 @@ def calculate_periods(
|
||||||
# Step 5: Add interval ends
|
# Step 5: Add interval ends
|
||||||
add_interval_ends(raw_periods, time=time)
|
add_interval_ends(raw_periods, time=time)
|
||||||
|
|
||||||
# Step 6: Filter periods by end date (keep periods ending yesterday or later)
|
# Step 6: Filter periods by end date (keep periods ending today or later)
|
||||||
# This ensures coordinator cache contains yesterday/today/tomorrow periods
|
|
||||||
# Sensors filter further for today+tomorrow, services can access all cached periods
|
|
||||||
raw_periods = filter_periods_by_end_date(raw_periods, time=time)
|
raw_periods = filter_periods_by_end_date(raw_periods, time=time)
|
||||||
|
|
||||||
# Step 7: Extract lightweight period summaries (no full price data)
|
# Step 8: Extract lightweight period summaries (no full price data)
|
||||||
# Note: Periods are filtered by end date to keep yesterday/today/tomorrow.
|
# Note: Filtering for current/future is done here based on end date,
|
||||||
# This preserves periods that started day-before-yesterday but end yesterday.
|
# not start date. This preserves periods that started yesterday but end today.
|
||||||
thresholds = TibberPricesThresholdConfig(
|
thresholds = TibberPricesThresholdConfig(
|
||||||
threshold_low=threshold_low,
|
threshold_low=threshold_low,
|
||||||
threshold_high=threshold_high,
|
threshold_high=threshold_high,
|
||||||
|
|
@ -209,26 +205,6 @@ def calculate_periods(
|
||||||
time=time,
|
time=time,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Step 8: Cross-day extension for late-night periods
|
|
||||||
# If a best-price period ends near midnight and tomorrow has continued low prices,
|
|
||||||
# extend the period across midnight to give users the full cheap window
|
|
||||||
period_summaries = extend_periods_across_midnight(
|
|
||||||
period_summaries,
|
|
||||||
all_prices_sorted,
|
|
||||||
price_context,
|
|
||||||
time=time,
|
|
||||||
reverse_sort=reverse_sort,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Step 9: Filter superseded periods
|
|
||||||
# When tomorrow data is available, late-night today periods that were found via
|
|
||||||
# relaxation may be obsolete if tomorrow has significantly better alternatives
|
|
||||||
period_summaries = filter_superseded_periods(
|
|
||||||
period_summaries,
|
|
||||||
time=time,
|
|
||||||
reverse_sort=reverse_sort,
|
|
||||||
)
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"periods": period_summaries, # Lightweight summaries only
|
"periods": period_summaries, # Lightweight summaries only
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
|
|
||||||
|
|
@ -155,12 +155,9 @@ def check_interval_criteria(
|
||||||
in_flex = price >= flex_threshold
|
in_flex = price >= flex_threshold
|
||||||
else:
|
else:
|
||||||
# Best price: accept prices <= (ref_price + flex_amount)
|
# Best price: accept prices <= (ref_price + flex_amount)
|
||||||
# Accept ALL low prices up to the flex threshold, not just those >= minimum
|
# Prices must be CLOSE TO or AT the minimum
|
||||||
# This ensures that if there are multiple low-price intervals, all that meet
|
|
||||||
# the threshold are included, regardless of whether they're before or after
|
|
||||||
# the daily minimum in the chronological sequence.
|
|
||||||
flex_threshold = criteria.ref_price + flex_amount
|
flex_threshold = criteria.ref_price + flex_amount
|
||||||
in_flex = price <= flex_threshold
|
in_flex = price >= criteria.ref_price and price <= flex_threshold
|
||||||
|
|
||||||
# ============================================================
|
# ============================================================
|
||||||
# MIN_DISTANCE FILTER: Check if price is far enough from average
|
# MIN_DISTANCE FILTER: Check if price is far enough from average
|
||||||
|
|
@ -184,7 +181,7 @@ def check_interval_criteria(
|
||||||
if scale_factor < SCALE_FACTOR_WARNING_THRESHOLD:
|
if scale_factor < SCALE_FACTOR_WARNING_THRESHOLD:
|
||||||
import logging # noqa: PLC0415
|
import logging # noqa: PLC0415
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(f"{__name__}.details") # noqa: N806
|
_LOGGER = logging.getLogger(__name__) # noqa: N806
|
||||||
_LOGGER.debug(
|
_LOGGER.debug(
|
||||||
"High flex %.1f%% detected: Reducing min_distance %.1f%% → %.1f%% (scale %.2f)",
|
"High flex %.1f%% detected: Reducing min_distance %.1f%% → %.1f%% (scale %.2f)",
|
||||||
flex_abs * 100,
|
flex_abs * 100,
|
||||||
|
|
|
||||||
|
|
@ -15,34 +15,19 @@ Uses statistical methods:
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from datetime import datetime
|
|
||||||
from typing import NamedTuple
|
from typing import NamedTuple
|
||||||
|
|
||||||
from custom_components.tibber_prices.utils.price import calculate_coefficient_of_variation
|
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
_LOGGER = logging.getLogger(__name__)
|
||||||
_LOGGER_DETAILS = logging.getLogger(__name__ + ".details")
|
_LOGGER_DETAILS = logging.getLogger(__name__ + ".details")
|
||||||
|
|
||||||
# Outlier filtering constants
|
# Outlier filtering constants
|
||||||
MIN_CONTEXT_SIZE = 3 # Minimum intervals needed before/after for analysis
|
MIN_CONTEXT_SIZE = 3 # Minimum intervals needed before/after for analysis
|
||||||
|
CONFIDENCE_LEVEL = 2.0 # Standard deviations for 95% confidence interval
|
||||||
VOLATILITY_THRESHOLD = 0.05 # 5% max relative std dev for zigzag detection
|
VOLATILITY_THRESHOLD = 0.05 # 5% max relative std dev for zigzag detection
|
||||||
SYMMETRY_THRESHOLD = 1.5 # Max std dev difference for symmetric spike
|
SYMMETRY_THRESHOLD = 1.5 # Max std dev difference for symmetric spike
|
||||||
RELATIVE_VOLATILITY_THRESHOLD = 2.0 # Window volatility vs context (cluster detection)
|
RELATIVE_VOLATILITY_THRESHOLD = 2.0 # Window volatility vs context (cluster detection)
|
||||||
ASYMMETRY_TAIL_WINDOW = 6 # Skip asymmetry check for last ~1.5h (6 intervals) of available data
|
ASYMMETRY_TAIL_WINDOW = 6 # Skip asymmetry check for last ~1.5h (6 intervals) of available data
|
||||||
ZIGZAG_TAIL_WINDOW = 6 # Skip zigzag/cluster detection for last ~1.5h (6 intervals)
|
ZIGZAG_TAIL_WINDOW = 6 # Skip zigzag/cluster detection for last ~1.5h (6 intervals)
|
||||||
EXTREMES_PROTECTION_TOLERANCE = 0.001 # Protect prices within 0.1% of daily min/max from smoothing
|
|
||||||
|
|
||||||
# Adaptive confidence level constants
|
|
||||||
# Uses coefficient of variation (CV) from utils/price.py for consistency with volatility sensors
|
|
||||||
# On flat days (low CV), we're more conservative (higher confidence = fewer smoothed)
|
|
||||||
# On volatile days (high CV), we're more aggressive (lower confidence = more smoothed)
|
|
||||||
CONFIDENCE_LEVEL_MIN = 1.5 # Minimum confidence (volatile days: smooth more aggressively)
|
|
||||||
CONFIDENCE_LEVEL_MAX = 2.5 # Maximum confidence (flat days: smooth more conservatively)
|
|
||||||
CONFIDENCE_LEVEL_DEFAULT = 2.0 # Default: 95% confidence interval (2 std devs)
|
|
||||||
# CV thresholds for adaptive confidence (align with volatility sensor defaults)
|
|
||||||
# These are in percentage points (e.g., 10.0 = 10% CV)
|
|
||||||
DAILY_CV_LOW = 10.0 # ≤10% CV = flat day (use max confidence)
|
|
||||||
DAILY_CV_HIGH = 30.0 # ≥30% CV = volatile day (use min confidence)
|
|
||||||
|
|
||||||
# Module-local log indentation (each module starts at level 0)
|
# Module-local log indentation (each module starts at level 0)
|
||||||
INDENT_L0 = "" # All logs in this module (no indentation needed)
|
INDENT_L0 = "" # All logs in this module (no indentation needed)
|
||||||
|
|
@ -248,166 +233,6 @@ def _validate_spike_candidate(
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def _calculate_daily_extremes(intervals: list[dict]) -> dict[str, tuple[float, float]]:
|
|
||||||
"""
|
|
||||||
Calculate daily min/max prices for each day in the interval list.
|
|
||||||
|
|
||||||
These extremes are used to protect reference prices from being smoothed.
|
|
||||||
The daily minimum is the reference for best_price periods, and the daily
|
|
||||||
maximum is the reference for peak_price periods - smoothing these would
|
|
||||||
break period detection.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
intervals: List of price intervals with 'startsAt' and 'total' keys
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dict mapping date strings to (min_price, max_price) tuples
|
|
||||||
|
|
||||||
"""
|
|
||||||
daily_prices: dict[str, list[float]] = {}
|
|
||||||
|
|
||||||
for interval in intervals:
|
|
||||||
starts_at = interval.get("startsAt")
|
|
||||||
if starts_at is None:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Handle both datetime objects and ISO strings
|
|
||||||
dt = datetime.fromisoformat(starts_at) if isinstance(starts_at, str) else starts_at
|
|
||||||
|
|
||||||
date_key = dt.strftime("%Y-%m-%d")
|
|
||||||
price = float(interval["total"])
|
|
||||||
daily_prices.setdefault(date_key, []).append(price)
|
|
||||||
|
|
||||||
# Calculate min/max for each day
|
|
||||||
return {date_key: (min(prices), max(prices)) for date_key, prices in daily_prices.items()}
|
|
||||||
|
|
||||||
|
|
||||||
def _calculate_daily_cv(intervals: list[dict]) -> dict[str, float]:
|
|
||||||
"""
|
|
||||||
Calculate daily coefficient of variation (CV) for each day.
|
|
||||||
|
|
||||||
Uses the same CV calculation as volatility sensors for consistency.
|
|
||||||
CV = (std_dev / mean) * 100, expressed as percentage.
|
|
||||||
|
|
||||||
Used to adapt the confidence level for outlier detection:
|
|
||||||
- Flat days (low CV): Higher confidence → fewer false positives
|
|
||||||
- Volatile days (high CV): Lower confidence → catch more real outliers
|
|
||||||
|
|
||||||
Args:
|
|
||||||
intervals: List of price intervals with 'startsAt' and 'total' keys
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dict mapping date strings to CV percentage (e.g., 15.0 for 15% CV)
|
|
||||||
|
|
||||||
"""
|
|
||||||
daily_prices: dict[str, list[float]] = {}
|
|
||||||
|
|
||||||
for interval in intervals:
|
|
||||||
starts_at = interval.get("startsAt")
|
|
||||||
if starts_at is None:
|
|
||||||
continue
|
|
||||||
|
|
||||||
dt = datetime.fromisoformat(starts_at) if isinstance(starts_at, str) else starts_at
|
|
||||||
date_key = dt.strftime("%Y-%m-%d")
|
|
||||||
price = float(interval["total"])
|
|
||||||
daily_prices.setdefault(date_key, []).append(price)
|
|
||||||
|
|
||||||
# Calculate CV using the shared function from utils/price.py
|
|
||||||
result = {}
|
|
||||||
for date_key, prices in daily_prices.items():
|
|
||||||
cv = calculate_coefficient_of_variation(prices)
|
|
||||||
result[date_key] = cv if cv is not None else 0.0
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def _get_adaptive_confidence_level(
|
|
||||||
interval: dict,
|
|
||||||
daily_cv: dict[str, float],
|
|
||||||
) -> float:
|
|
||||||
"""
|
|
||||||
Get adaptive confidence level based on daily coefficient of variation (CV).
|
|
||||||
|
|
||||||
Maps daily CV to confidence level:
|
|
||||||
- Low CV (≤10%): High confidence (2.5) → conservative, fewer smoothed
|
|
||||||
- High CV (≥30%): Low confidence (1.5) → aggressive, more smoothed
|
|
||||||
- Between: Linear interpolation
|
|
||||||
|
|
||||||
Uses the same CV calculation as volatility sensors for consistency.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
interval: Price interval dict with 'startsAt' key
|
|
||||||
daily_cv: Dict from _calculate_daily_cv()
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Confidence level multiplier for std_dev threshold
|
|
||||||
|
|
||||||
"""
|
|
||||||
starts_at = interval.get("startsAt")
|
|
||||||
if starts_at is None:
|
|
||||||
return CONFIDENCE_LEVEL_DEFAULT
|
|
||||||
|
|
||||||
dt = datetime.fromisoformat(starts_at) if isinstance(starts_at, str) else starts_at
|
|
||||||
date_key = dt.strftime("%Y-%m-%d")
|
|
||||||
|
|
||||||
cv = daily_cv.get(date_key, 0.0)
|
|
||||||
|
|
||||||
# Linear interpolation between LOW and HIGH CV
|
|
||||||
# Low CV → high confidence (conservative)
|
|
||||||
# High CV → low confidence (aggressive)
|
|
||||||
if cv <= DAILY_CV_LOW:
|
|
||||||
return CONFIDENCE_LEVEL_MAX
|
|
||||||
if cv >= DAILY_CV_HIGH:
|
|
||||||
return CONFIDENCE_LEVEL_MIN
|
|
||||||
|
|
||||||
# Linear interpolation: as CV increases, confidence decreases
|
|
||||||
ratio = (cv - DAILY_CV_LOW) / (DAILY_CV_HIGH - DAILY_CV_LOW)
|
|
||||||
return CONFIDENCE_LEVEL_MAX - (ratio * (CONFIDENCE_LEVEL_MAX - CONFIDENCE_LEVEL_MIN))
|
|
||||||
|
|
||||||
|
|
||||||
def _is_daily_extreme(
|
|
||||||
interval: dict,
|
|
||||||
daily_extremes: dict[str, tuple[float, float]],
|
|
||||||
tolerance: float = EXTREMES_PROTECTION_TOLERANCE,
|
|
||||||
) -> bool:
|
|
||||||
"""
|
|
||||||
Check if an interval's price is at or very near a daily extreme.
|
|
||||||
|
|
||||||
Prices at daily extremes should never be smoothed because:
|
|
||||||
- Daily minimum is the reference for best_price period detection
|
|
||||||
- Daily maximum is the reference for peak_price period detection
|
|
||||||
- Smoothing these would cause periods to miss their most important intervals
|
|
||||||
|
|
||||||
Args:
|
|
||||||
interval: Price interval dict with 'startsAt' and 'total' keys
|
|
||||||
daily_extremes: Dict from _calculate_daily_extremes()
|
|
||||||
tolerance: Relative tolerance for matching (default 0.1%)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if the price is at or very near a daily min or max
|
|
||||||
|
|
||||||
"""
|
|
||||||
starts_at = interval.get("startsAt")
|
|
||||||
if starts_at is None:
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Handle both datetime objects and ISO strings
|
|
||||||
dt = datetime.fromisoformat(starts_at) if isinstance(starts_at, str) else starts_at
|
|
||||||
|
|
||||||
date_key = dt.strftime("%Y-%m-%d")
|
|
||||||
if date_key not in daily_extremes:
|
|
||||||
return False
|
|
||||||
|
|
||||||
price = float(interval["total"])
|
|
||||||
daily_min, daily_max = daily_extremes[date_key]
|
|
||||||
|
|
||||||
# Check if price is within tolerance of daily min or max
|
|
||||||
# Using relative tolerance: |price - extreme| <= extreme * tolerance
|
|
||||||
min_threshold = daily_min * (1 + tolerance)
|
|
||||||
max_threshold = daily_max * (1 - tolerance)
|
|
||||||
|
|
||||||
return price <= min_threshold or price >= max_threshold
|
|
||||||
|
|
||||||
|
|
||||||
def filter_price_outliers(
|
def filter_price_outliers(
|
||||||
intervals: list[dict],
|
intervals: list[dict],
|
||||||
flexibility_pct: float,
|
flexibility_pct: float,
|
||||||
|
|
@ -435,29 +260,15 @@ def filter_price_outliers(
|
||||||
Intervals with smoothed prices (marked with _smoothed flag)
|
Intervals with smoothed prices (marked with _smoothed flag)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Convert percentage to ratio once for all comparisons (e.g., 15.0 → 0.15)
|
|
||||||
flexibility_ratio = flexibility_pct / 100
|
|
||||||
|
|
||||||
# Calculate daily extremes to protect reference prices from smoothing
|
|
||||||
# Daily min is the reference for best_price, daily max for peak_price
|
|
||||||
daily_extremes = _calculate_daily_extremes(intervals)
|
|
||||||
|
|
||||||
# Calculate daily coefficient of variation (CV) for adaptive confidence levels
|
|
||||||
# Uses same CV calculation as volatility sensors for consistency
|
|
||||||
# Flat days → conservative smoothing, volatile days → aggressive smoothing
|
|
||||||
daily_cv = _calculate_daily_cv(intervals)
|
|
||||||
|
|
||||||
# Log CV info for debugging (CV is in percentage points, e.g., 15.0 = 15%)
|
|
||||||
cv_info = ", ".join(f"{date}: {cv:.1f}%" for date, cv in sorted(daily_cv.items()))
|
|
||||||
_LOGGER.info(
|
_LOGGER.info(
|
||||||
"%sSmoothing price outliers: %d intervals, flex=%.1f%%, daily CV: %s",
|
"%sSmoothing price outliers: %d intervals, flex=%.1f%%",
|
||||||
INDENT_L0,
|
INDENT_L0,
|
||||||
len(intervals),
|
len(intervals),
|
||||||
flexibility_pct,
|
flexibility_pct,
|
||||||
cv_info,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
protected_count = 0
|
# Convert percentage to ratio once for all comparisons (e.g., 15.0 → 0.15)
|
||||||
|
flexibility_ratio = flexibility_pct / 100
|
||||||
|
|
||||||
result = []
|
result = []
|
||||||
smoothed_count = 0
|
smoothed_count = 0
|
||||||
|
|
@ -465,20 +276,6 @@ def filter_price_outliers(
|
||||||
for i, current in enumerate(intervals):
|
for i, current in enumerate(intervals):
|
||||||
current_price = current["total"]
|
current_price = current["total"]
|
||||||
|
|
||||||
# CRITICAL: Never smooth daily extremes - they are the reference prices!
|
|
||||||
# Smoothing the daily min would break best_price period detection,
|
|
||||||
# smoothing the daily max would break peak_price period detection.
|
|
||||||
if _is_daily_extreme(current, daily_extremes):
|
|
||||||
result.append(current)
|
|
||||||
protected_count += 1
|
|
||||||
_LOGGER_DETAILS.debug(
|
|
||||||
"%sProtected daily extreme at %s: %.2f ct/kWh (not smoothed)",
|
|
||||||
INDENT_L0,
|
|
||||||
current.get("startsAt", f"index {i}"),
|
|
||||||
current_price * 100,
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Get context windows (3 intervals before and after)
|
# Get context windows (3 intervals before and after)
|
||||||
context_before = intervals[max(0, i - MIN_CONTEXT_SIZE) : i]
|
context_before = intervals[max(0, i - MIN_CONTEXT_SIZE) : i]
|
||||||
context_after = intervals[i + 1 : min(len(intervals), i + 1 + MIN_CONTEXT_SIZE)]
|
context_after = intervals[i + 1 : min(len(intervals), i + 1 + MIN_CONTEXT_SIZE)]
|
||||||
|
|
@ -500,11 +297,8 @@ def filter_price_outliers(
|
||||||
# Calculate how far current price deviates from expected
|
# Calculate how far current price deviates from expected
|
||||||
residual = abs(current_price - expected_price)
|
residual = abs(current_price - expected_price)
|
||||||
|
|
||||||
# Adaptive confidence level based on daily CV:
|
# Tolerance based on statistical confidence (2 std dev = 95% confidence)
|
||||||
# - Flat days (low CV): higher confidence (2.5) → fewer false positives
|
tolerance = stats["std_dev"] * CONFIDENCE_LEVEL
|
||||||
# - Volatile days (high CV): lower confidence (1.5) → catch more real spikes
|
|
||||||
confidence_level = _get_adaptive_confidence_level(current, daily_cv)
|
|
||||||
tolerance = stats["std_dev"] * confidence_level
|
|
||||||
|
|
||||||
# Not a spike if within tolerance
|
# Not a spike if within tolerance
|
||||||
if residual <= tolerance:
|
if residual <= tolerance:
|
||||||
|
|
@ -538,22 +332,23 @@ def filter_price_outliers(
|
||||||
smoothed_count += 1
|
smoothed_count += 1
|
||||||
|
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"%sSmoothed spike at %s: %.2f → %.2f ct/kWh (residual: %.2f, tolerance: %.2f, confidence: %.2f)",
|
"%sSmoothed spike at %s: %.2f → %.2f ct/kWh (residual: %.2f, tolerance: %.2f, trend_slope: %.4f)",
|
||||||
INDENT_L0,
|
INDENT_L0,
|
||||||
current.get("startsAt", f"index {i}"),
|
current.get("startsAt", f"index {i}"),
|
||||||
current_price * 100,
|
current_price * 100,
|
||||||
expected_price * 100,
|
expected_price * 100,
|
||||||
residual * 100,
|
residual * 100,
|
||||||
tolerance * 100,
|
tolerance * 100,
|
||||||
confidence_level,
|
stats["trend_slope"] * 100,
|
||||||
)
|
)
|
||||||
|
|
||||||
if smoothed_count > 0 or protected_count > 0:
|
if smoothed_count > 0:
|
||||||
_LOGGER.info(
|
_LOGGER.info(
|
||||||
"%sPrice outlier smoothing complete: %d smoothed, %d protected (daily extremes)",
|
"%sPrice outlier smoothing complete: %d/%d intervals smoothed (%.1f%%)",
|
||||||
INDENT_L0,
|
INDENT_L0,
|
||||||
smoothed_count,
|
smoothed_count,
|
||||||
protected_count,
|
len(intervals),
|
||||||
|
(smoothed_count / len(intervals)) * 100,
|
||||||
)
|
)
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
|
||||||
|
|
@ -3,12 +3,13 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from datetime import date, datetime, timedelta
|
|
||||||
from typing import TYPE_CHECKING, Any
|
from typing import TYPE_CHECKING, Any
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import PRICE_LEVEL_MAPPING
|
from custom_components.tibber_prices.const import PRICE_LEVEL_MAPPING
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
from datetime import date
|
||||||
|
|
||||||
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
||||||
|
|
||||||
from .level_filtering import (
|
from .level_filtering import (
|
||||||
|
|
@ -247,21 +248,19 @@ def add_interval_ends(periods: list[list[dict]], *, time: TibberPricesTimeServic
|
||||||
|
|
||||||
def filter_periods_by_end_date(periods: list[list[dict]], *, time: TibberPricesTimeService) -> list[list[dict]]:
|
def filter_periods_by_end_date(periods: list[list[dict]], *, time: TibberPricesTimeService) -> list[list[dict]]:
|
||||||
"""
|
"""
|
||||||
Filter periods to keep only relevant ones for yesterday, today, and tomorrow.
|
Filter periods to keep only relevant ones for today and tomorrow.
|
||||||
|
|
||||||
Keep periods that:
|
Keep periods that:
|
||||||
- End yesterday or later (>= start of yesterday)
|
- End in the future (> now)
|
||||||
|
- End today but after the start of the day (not exactly at midnight)
|
||||||
|
|
||||||
This removes:
|
This removes:
|
||||||
- Periods that ended before yesterday (day-before-yesterday or earlier)
|
- Periods that ended yesterday
|
||||||
|
- Periods that ended exactly at midnight today (they're completely in the past)
|
||||||
Rationale: Coordinator caches periods for yesterday/today/tomorrow so that:
|
|
||||||
- Binary sensors can filter for today+tomorrow (current/next periods)
|
|
||||||
- Services can access yesterday's periods when user requests "yesterday" data
|
|
||||||
"""
|
"""
|
||||||
now = time.now()
|
now = time.now()
|
||||||
# Calculate start of yesterday (midnight yesterday)
|
today = now.date()
|
||||||
yesterday_start = time.start_of_local_day(now) - time.get_interval_duration() * 96 # 96 intervals = 24 hours
|
midnight_today = time.start_of_local_day(now)
|
||||||
|
|
||||||
filtered = []
|
filtered = []
|
||||||
for period in periods:
|
for period in periods:
|
||||||
|
|
@ -275,433 +274,13 @@ def filter_periods_by_end_date(periods: list[list[dict]], *, time: TibberPricesT
|
||||||
if not period_end:
|
if not period_end:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Keep if period ends yesterday or later
|
# Keep if period ends in the future
|
||||||
if period_end >= yesterday_start:
|
if time.is_in_future(period_end):
|
||||||
|
filtered.append(period)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Keep if period ends today but AFTER midnight (not exactly at midnight)
|
||||||
|
if period_end.date() == today and period_end > midnight_today:
|
||||||
filtered.append(period)
|
filtered.append(period)
|
||||||
|
|
||||||
return filtered
|
return filtered
|
||||||
|
|
||||||
|
|
||||||
def _categorize_periods_for_supersession(
|
|
||||||
period_summaries: list[dict],
|
|
||||||
today: date,
|
|
||||||
tomorrow: date,
|
|
||||||
late_hour_threshold: int,
|
|
||||||
early_hour_limit: int,
|
|
||||||
) -> tuple[list[dict], list[dict], list[dict]]:
|
|
||||||
"""Categorize periods into today-late, tomorrow-early, and other."""
|
|
||||||
today_late: list[dict] = []
|
|
||||||
tomorrow_early: list[dict] = []
|
|
||||||
other: list[dict] = []
|
|
||||||
|
|
||||||
for period in period_summaries:
|
|
||||||
period_start = period.get("start")
|
|
||||||
period_end = period.get("end")
|
|
||||||
|
|
||||||
if not period_start or not period_end:
|
|
||||||
other.append(period)
|
|
||||||
# Today late-night periods: START today at or after late_hour_threshold (e.g., 20:00)
|
|
||||||
# Note: period_end could be tomorrow (e.g., 23:30-00:00 spans midnight)
|
|
||||||
elif period_start.date() == today and period_start.hour >= late_hour_threshold:
|
|
||||||
today_late.append(period)
|
|
||||||
# Tomorrow early-morning periods: START tomorrow before early_hour_limit (e.g., 08:00)
|
|
||||||
elif period_start.date() == tomorrow and period_start.hour < early_hour_limit:
|
|
||||||
tomorrow_early.append(period)
|
|
||||||
else:
|
|
||||||
other.append(period)
|
|
||||||
|
|
||||||
return today_late, tomorrow_early, other
|
|
||||||
|
|
||||||
|
|
||||||
def _filter_superseded_today_periods(
|
|
||||||
today_late_periods: list[dict],
|
|
||||||
best_tomorrow: dict,
|
|
||||||
best_tomorrow_price: float,
|
|
||||||
improvement_threshold: float,
|
|
||||||
) -> list[dict]:
|
|
||||||
"""Filter today periods that are superseded by a better tomorrow period."""
|
|
||||||
kept: list[dict] = []
|
|
||||||
|
|
||||||
for today_period in today_late_periods:
|
|
||||||
today_price = today_period.get("price_mean")
|
|
||||||
|
|
||||||
if today_price is None:
|
|
||||||
kept.append(today_period)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Calculate how much better tomorrow is (as percentage)
|
|
||||||
improvement_pct = ((today_price - best_tomorrow_price) / today_price * 100) if today_price > 0 else 0
|
|
||||||
|
|
||||||
_LOGGER.debug(
|
|
||||||
"Supersession check: Today %s-%s (%.4f) vs Tomorrow %s-%s (%.4f) = %.1f%% improvement (threshold: %.1f%%)",
|
|
||||||
today_period["start"].strftime("%H:%M"),
|
|
||||||
today_period["end"].strftime("%H:%M"),
|
|
||||||
today_price,
|
|
||||||
best_tomorrow["start"].strftime("%H:%M"),
|
|
||||||
best_tomorrow["end"].strftime("%H:%M"),
|
|
||||||
best_tomorrow_price,
|
|
||||||
improvement_pct,
|
|
||||||
improvement_threshold,
|
|
||||||
)
|
|
||||||
|
|
||||||
if improvement_pct >= improvement_threshold:
|
|
||||||
_LOGGER.info(
|
|
||||||
"Period superseded: Today %s-%s (%.2f) replaced by Tomorrow %s-%s (%.2f, %.1f%% better)",
|
|
||||||
today_period["start"].strftime("%H:%M"),
|
|
||||||
today_period["end"].strftime("%H:%M"),
|
|
||||||
today_price,
|
|
||||||
best_tomorrow["start"].strftime("%H:%M"),
|
|
||||||
best_tomorrow["end"].strftime("%H:%M"),
|
|
||||||
best_tomorrow_price,
|
|
||||||
improvement_pct,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
kept.append(today_period)
|
|
||||||
|
|
||||||
return kept
|
|
||||||
|
|
||||||
|
|
||||||
def filter_superseded_periods(
|
|
||||||
period_summaries: list[dict],
|
|
||||||
*,
|
|
||||||
time: TibberPricesTimeService,
|
|
||||||
reverse_sort: bool,
|
|
||||||
) -> list[dict]:
|
|
||||||
"""
|
|
||||||
Filter out late-night today periods that are superseded by better tomorrow periods.
|
|
||||||
|
|
||||||
When tomorrow's data becomes available, some late-night periods that were found
|
|
||||||
through relaxation may no longer make sense. If tomorrow has a significantly
|
|
||||||
better period in the early morning, the late-night today period is obsolete.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
- Today 23:30-00:00 at 0.70 kr (found via relaxation, was best available)
|
|
||||||
- Tomorrow 04:00-05:30 at 0.50 kr (much better alternative)
|
|
||||||
→ The today period is superseded and should be filtered out
|
|
||||||
|
|
||||||
This only applies to best-price periods (reverse_sort=False).
|
|
||||||
Peak-price periods are not filtered this way.
|
|
||||||
|
|
||||||
"""
|
|
||||||
from .types import ( # noqa: PLC0415
|
|
||||||
CROSS_DAY_LATE_PERIOD_START_HOUR,
|
|
||||||
CROSS_DAY_MAX_EXTENSION_HOUR,
|
|
||||||
SUPERSESSION_PRICE_IMPROVEMENT_PCT,
|
|
||||||
)
|
|
||||||
|
|
||||||
_LOGGER.debug(
|
|
||||||
"filter_superseded_periods called: %d periods, reverse_sort=%s",
|
|
||||||
len(period_summaries) if period_summaries else 0,
|
|
||||||
reverse_sort,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Only filter for best-price periods
|
|
||||||
if reverse_sort or not period_summaries:
|
|
||||||
return period_summaries
|
|
||||||
|
|
||||||
now = time.now()
|
|
||||||
today = now.date()
|
|
||||||
tomorrow = today + timedelta(days=1)
|
|
||||||
|
|
||||||
# Categorize periods
|
|
||||||
today_late, tomorrow_early, other = _categorize_periods_for_supersession(
|
|
||||||
period_summaries,
|
|
||||||
today,
|
|
||||||
tomorrow,
|
|
||||||
CROSS_DAY_LATE_PERIOD_START_HOUR,
|
|
||||||
CROSS_DAY_MAX_EXTENSION_HOUR,
|
|
||||||
)
|
|
||||||
|
|
||||||
_LOGGER.debug(
|
|
||||||
"Supersession categorization: today_late=%d, tomorrow_early=%d, other=%d",
|
|
||||||
len(today_late),
|
|
||||||
len(tomorrow_early),
|
|
||||||
len(other),
|
|
||||||
)
|
|
||||||
|
|
||||||
# If no tomorrow early periods, nothing to compare against
|
|
||||||
if not tomorrow_early:
|
|
||||||
_LOGGER.debug("No tomorrow early periods - skipping supersession check")
|
|
||||||
return period_summaries
|
|
||||||
|
|
||||||
# Find the best tomorrow early period (lowest mean price)
|
|
||||||
best_tomorrow = min(tomorrow_early, key=lambda p: p.get("price_mean", float("inf")))
|
|
||||||
best_tomorrow_price = best_tomorrow.get("price_mean")
|
|
||||||
|
|
||||||
if best_tomorrow_price is None:
|
|
||||||
return period_summaries
|
|
||||||
|
|
||||||
# Filter superseded today periods
|
|
||||||
kept_today = _filter_superseded_today_periods(
|
|
||||||
today_late,
|
|
||||||
best_tomorrow,
|
|
||||||
best_tomorrow_price,
|
|
||||||
SUPERSESSION_PRICE_IMPROVEMENT_PCT,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Reconstruct and sort by start time
|
|
||||||
result = other + kept_today + tomorrow_early
|
|
||||||
result.sort(key=lambda p: p.get("start") or time.now())
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def _is_period_eligible_for_extension(
|
|
||||||
period: dict,
|
|
||||||
today: date,
|
|
||||||
late_hour_threshold: int,
|
|
||||||
) -> bool:
|
|
||||||
"""
|
|
||||||
Check if a period is eligible for cross-day extension.
|
|
||||||
|
|
||||||
Eligibility criteria:
|
|
||||||
- Period has valid start and end times
|
|
||||||
- Period ends on today (not yesterday or tomorrow)
|
|
||||||
- Period ends late (after late_hour_threshold, e.g. 20:00)
|
|
||||||
|
|
||||||
"""
|
|
||||||
period_end = period.get("end")
|
|
||||||
period_start = period.get("start")
|
|
||||||
|
|
||||||
if not period_end or not period_start:
|
|
||||||
return False
|
|
||||||
|
|
||||||
if period_end.date() != today:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return period_end.hour >= late_hour_threshold
|
|
||||||
|
|
||||||
|
|
||||||
def _find_extension_intervals(
|
|
||||||
period_end: datetime,
|
|
||||||
price_lookup: dict[str, dict],
|
|
||||||
criteria: Any,
|
|
||||||
max_extension_time: datetime,
|
|
||||||
interval_duration: timedelta,
|
|
||||||
) -> list[dict]:
|
|
||||||
"""
|
|
||||||
Find consecutive intervals after period_end that meet criteria.
|
|
||||||
|
|
||||||
Iterates forward from period_end, adding intervals while they
|
|
||||||
meet the flex and min_distance criteria. Stops at first failure
|
|
||||||
or when reaching max_extension_time.
|
|
||||||
|
|
||||||
"""
|
|
||||||
from .level_filtering import check_interval_criteria # noqa: PLC0415
|
|
||||||
|
|
||||||
extension_intervals: list[dict] = []
|
|
||||||
check_time = period_end
|
|
||||||
|
|
||||||
while check_time < max_extension_time:
|
|
||||||
price_data = price_lookup.get(check_time.isoformat())
|
|
||||||
if not price_data:
|
|
||||||
break # No more data
|
|
||||||
|
|
||||||
price = float(price_data["total"])
|
|
||||||
in_flex, meets_min_distance = check_interval_criteria(price, criteria)
|
|
||||||
|
|
||||||
if not (in_flex and meets_min_distance):
|
|
||||||
break # Criteria no longer met
|
|
||||||
|
|
||||||
extension_intervals.append(price_data)
|
|
||||||
check_time = check_time + interval_duration
|
|
||||||
|
|
||||||
return extension_intervals
|
|
||||||
|
|
||||||
|
|
||||||
def _collect_original_period_prices(
|
|
||||||
period_start: datetime,
|
|
||||||
period_end: datetime,
|
|
||||||
price_lookup: dict[str, dict],
|
|
||||||
interval_duration: timedelta,
|
|
||||||
) -> list[float]:
|
|
||||||
"""Collect prices from original period for CV calculation."""
|
|
||||||
prices: list[float] = []
|
|
||||||
current = period_start
|
|
||||||
while current < period_end:
|
|
||||||
price_data = price_lookup.get(current.isoformat())
|
|
||||||
if price_data:
|
|
||||||
prices.append(float(price_data["total"]))
|
|
||||||
current = current + interval_duration
|
|
||||||
return prices
|
|
||||||
|
|
||||||
|
|
||||||
def _build_extended_period(
|
|
||||||
period: dict,
|
|
||||||
extension_intervals: list[dict],
|
|
||||||
combined_prices: list[float],
|
|
||||||
combined_cv: float,
|
|
||||||
interval_duration: timedelta,
|
|
||||||
) -> dict:
|
|
||||||
"""Create extended period dict with updated statistics."""
|
|
||||||
period_start = period["start"]
|
|
||||||
period_end = period["end"]
|
|
||||||
new_end = period_end + (interval_duration * len(extension_intervals))
|
|
||||||
|
|
||||||
extended = period.copy()
|
|
||||||
extended["end"] = new_end
|
|
||||||
extended["duration_minutes"] = int((new_end - period_start).total_seconds() / 60)
|
|
||||||
extended["period_interval_count"] = len(combined_prices)
|
|
||||||
extended["cross_day_extended"] = True
|
|
||||||
extended["cross_day_extension_intervals"] = len(extension_intervals)
|
|
||||||
|
|
||||||
# Recalculate price statistics
|
|
||||||
extended["price_min"] = min(combined_prices)
|
|
||||||
extended["price_max"] = max(combined_prices)
|
|
||||||
extended["price_mean"] = sum(combined_prices) / len(combined_prices)
|
|
||||||
extended["price_spread"] = extended["price_max"] - extended["price_min"]
|
|
||||||
extended["price_coefficient_variation_%"] = round(combined_cv, 1)
|
|
||||||
|
|
||||||
return extended
|
|
||||||
|
|
||||||
|
|
||||||
def extend_periods_across_midnight(
|
|
||||||
period_summaries: list[dict],
|
|
||||||
all_prices: list[dict],
|
|
||||||
price_context: dict[str, Any],
|
|
||||||
*,
|
|
||||||
time: TibberPricesTimeService,
|
|
||||||
reverse_sort: bool,
|
|
||||||
) -> list[dict]:
|
|
||||||
"""
|
|
||||||
Extend late-night periods across midnight if favorable prices continue.
|
|
||||||
|
|
||||||
When a period ends close to midnight and tomorrow's data shows continued
|
|
||||||
favorable prices, extend the period into the next day. This prevents
|
|
||||||
artificial period breaks at midnight when it's actually better to continue.
|
|
||||||
|
|
||||||
Example: Best price period 22:00-23:45 today could extend to 04:00 tomorrow
|
|
||||||
if prices remain low overnight.
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
- Only extends periods ending after CROSS_DAY_LATE_PERIOD_START_HOUR (20:00)
|
|
||||||
- Won't extend beyond CROSS_DAY_MAX_EXTENSION_HOUR (08:00) next day
|
|
||||||
- Extension must pass same flex criteria as original period
|
|
||||||
- Quality Gate (CV check) applies to extended period
|
|
||||||
|
|
||||||
Args:
|
|
||||||
period_summaries: List of period summary dicts (already processed)
|
|
||||||
all_prices: All price intervals including tomorrow
|
|
||||||
price_context: Dict with ref_prices, avg_prices, flex, min_distance_from_avg
|
|
||||||
time: Time service instance
|
|
||||||
reverse_sort: True for peak price, False for best price
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Updated list of period summaries with extensions applied
|
|
||||||
|
|
||||||
"""
|
|
||||||
from custom_components.tibber_prices.utils.price import calculate_coefficient_of_variation # noqa: PLC0415
|
|
||||||
|
|
||||||
from .types import ( # noqa: PLC0415
|
|
||||||
CROSS_DAY_LATE_PERIOD_START_HOUR,
|
|
||||||
CROSS_DAY_MAX_EXTENSION_HOUR,
|
|
||||||
PERIOD_MAX_CV,
|
|
||||||
TibberPricesIntervalCriteria,
|
|
||||||
)
|
|
||||||
|
|
||||||
if not period_summaries or not all_prices:
|
|
||||||
return period_summaries
|
|
||||||
|
|
||||||
# Build price lookup by timestamp
|
|
||||||
price_lookup: dict[str, dict] = {}
|
|
||||||
for price_data in all_prices:
|
|
||||||
interval_time = time.get_interval_time(price_data)
|
|
||||||
if interval_time:
|
|
||||||
price_lookup[interval_time.isoformat()] = price_data
|
|
||||||
|
|
||||||
ref_prices = price_context.get("ref_prices", {})
|
|
||||||
avg_prices = price_context.get("avg_prices", {})
|
|
||||||
flex = price_context.get("flex", 0.15)
|
|
||||||
min_distance = price_context.get("min_distance_from_avg", 0)
|
|
||||||
|
|
||||||
now = time.now()
|
|
||||||
today = now.date()
|
|
||||||
tomorrow = today + timedelta(days=1)
|
|
||||||
interval_duration = time.get_interval_duration()
|
|
||||||
|
|
||||||
# Max extension time (e.g., 08:00 tomorrow)
|
|
||||||
max_extension_time = time.start_of_local_day(now) + timedelta(days=1, hours=CROSS_DAY_MAX_EXTENSION_HOUR)
|
|
||||||
|
|
||||||
extended_summaries = []
|
|
||||||
|
|
||||||
for period in period_summaries:
|
|
||||||
# Check eligibility for extension
|
|
||||||
if not _is_period_eligible_for_extension(period, today, CROSS_DAY_LATE_PERIOD_START_HOUR):
|
|
||||||
extended_summaries.append(period)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Get tomorrow's reference prices
|
|
||||||
tomorrow_ref = ref_prices.get(tomorrow) or ref_prices.get(str(tomorrow))
|
|
||||||
tomorrow_avg = avg_prices.get(tomorrow) or avg_prices.get(str(tomorrow))
|
|
||||||
|
|
||||||
if tomorrow_ref is None or tomorrow_avg is None:
|
|
||||||
extended_summaries.append(period)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Set up criteria for extension check
|
|
||||||
criteria = TibberPricesIntervalCriteria(
|
|
||||||
ref_price=tomorrow_ref,
|
|
||||||
avg_price=tomorrow_avg,
|
|
||||||
flex=flex,
|
|
||||||
min_distance_from_avg=min_distance,
|
|
||||||
reverse_sort=reverse_sort,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Find extension intervals
|
|
||||||
extension_intervals = _find_extension_intervals(
|
|
||||||
period["end"],
|
|
||||||
price_lookup,
|
|
||||||
criteria,
|
|
||||||
max_extension_time,
|
|
||||||
interval_duration,
|
|
||||||
)
|
|
||||||
|
|
||||||
if not extension_intervals:
|
|
||||||
extended_summaries.append(period)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Collect all prices for CV check
|
|
||||||
original_prices = _collect_original_period_prices(
|
|
||||||
period["start"],
|
|
||||||
period["end"],
|
|
||||||
price_lookup,
|
|
||||||
interval_duration,
|
|
||||||
)
|
|
||||||
extension_prices = [float(p["total"]) for p in extension_intervals]
|
|
||||||
combined_prices = original_prices + extension_prices
|
|
||||||
|
|
||||||
# Quality Gate: Check CV of extended period
|
|
||||||
combined_cv = calculate_coefficient_of_variation(combined_prices)
|
|
||||||
|
|
||||||
if combined_cv is not None and combined_cv <= PERIOD_MAX_CV:
|
|
||||||
# Extension passes quality gate
|
|
||||||
extended_period = _build_extended_period(
|
|
||||||
period,
|
|
||||||
extension_intervals,
|
|
||||||
combined_prices,
|
|
||||||
combined_cv,
|
|
||||||
interval_duration,
|
|
||||||
)
|
|
||||||
|
|
||||||
_LOGGER.info(
|
|
||||||
"Cross-day extension: Period %s-%s extended to %s (+%d intervals, CV=%.1f%%)",
|
|
||||||
period["start"].strftime("%H:%M"),
|
|
||||||
period["end"].strftime("%H:%M"),
|
|
||||||
extended_period["end"].strftime("%H:%M"),
|
|
||||||
len(extension_intervals),
|
|
||||||
combined_cv,
|
|
||||||
)
|
|
||||||
extended_summaries.append(extended_period)
|
|
||||||
else:
|
|
||||||
# Extension would exceed quality gate
|
|
||||||
_LOGGER_DETAILS.debug(
|
|
||||||
"%sCross-day extension rejected for period %s-%s: CV=%.1f%% > %.1f%%",
|
|
||||||
INDENT_L0,
|
|
||||||
period["start"].strftime("%H:%M"),
|
|
||||||
period["end"].strftime("%H:%M"),
|
|
||||||
combined_cv or 0,
|
|
||||||
PERIOD_MAX_CV,
|
|
||||||
)
|
|
||||||
extended_summaries.append(period)
|
|
||||||
|
|
||||||
return extended_summaries
|
|
||||||
|
|
|
||||||
|
|
@ -17,41 +17,6 @@ INDENT_L1 = " " # Nested logic / loop iterations
|
||||||
INDENT_L2 = " " # Deeper nesting
|
INDENT_L2 = " " # Deeper nesting
|
||||||
|
|
||||||
|
|
||||||
def _estimate_merged_cv(period1: dict, period2: dict) -> float | None:
|
|
||||||
"""
|
|
||||||
Estimate the CV of a merged period from two period summaries.
|
|
||||||
|
|
||||||
Since we don't have the raw prices, we estimate using the combined min/max range.
|
|
||||||
This is a conservative estimate - the actual CV could be higher or lower.
|
|
||||||
|
|
||||||
Formula: CV ≈ (range / 2) / mean * 100
|
|
||||||
Where range = max - min, mean = (min + max) / 2
|
|
||||||
|
|
||||||
This approximation assumes roughly uniform distribution within the range.
|
|
||||||
"""
|
|
||||||
p1_min = period1.get("price_min")
|
|
||||||
p1_max = period1.get("price_max")
|
|
||||||
p2_min = period2.get("price_min")
|
|
||||||
p2_max = period2.get("price_max")
|
|
||||||
|
|
||||||
if None in (p1_min, p1_max, p2_min, p2_max):
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Cast to float - None case handled above
|
|
||||||
combined_min = min(float(p1_min), float(p2_min)) # type: ignore[arg-type]
|
|
||||||
combined_max = max(float(p1_max), float(p2_max)) # type: ignore[arg-type]
|
|
||||||
|
|
||||||
if combined_min <= 0:
|
|
||||||
return None
|
|
||||||
|
|
||||||
combined_mean = (combined_min + combined_max) / 2
|
|
||||||
price_range = combined_max - combined_min
|
|
||||||
|
|
||||||
# CV estimate based on range (assuming uniform distribution)
|
|
||||||
# For uniform distribution: std_dev ≈ range / sqrt(12) ≈ range / 3.46
|
|
||||||
return (price_range / 3.46) / combined_mean * 100
|
|
||||||
|
|
||||||
|
|
||||||
def recalculate_period_metadata(periods: list[dict], *, time: TibberPricesTimeService) -> None:
|
def recalculate_period_metadata(periods: list[dict], *, time: TibberPricesTimeService) -> None:
|
||||||
"""
|
"""
|
||||||
Recalculate period metadata after merging periods.
|
Recalculate period metadata after merging periods.
|
||||||
|
|
@ -140,7 +105,7 @@ def merge_adjacent_periods(period1: dict, period2: dict) -> dict:
|
||||||
"period2_end": period2["end"].isoformat(),
|
"period2_end": period2["end"].isoformat(),
|
||||||
}
|
}
|
||||||
|
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER.debug(
|
||||||
"%sMerged periods: %s-%s + %s-%s → %s-%s (duration: %d min)",
|
"%sMerged periods: %s-%s + %s-%s → %s-%s (duration: %d min)",
|
||||||
INDENT_L2,
|
INDENT_L2,
|
||||||
period1["start"].strftime("%H:%M"),
|
period1["start"].strftime("%H:%M"),
|
||||||
|
|
@ -155,119 +120,6 @@ def merge_adjacent_periods(period1: dict, period2: dict) -> dict:
|
||||||
return merged
|
return merged
|
||||||
|
|
||||||
|
|
||||||
def _check_merge_quality_gate(periods_to_merge: list[tuple[int, dict]], relaxed: dict) -> bool:
|
|
||||||
"""
|
|
||||||
Check if merging would create a period that's too heterogeneous.
|
|
||||||
|
|
||||||
Returns True if merge is allowed, False if blocked by Quality Gate.
|
|
||||||
"""
|
|
||||||
from .types import PERIOD_MAX_CV # noqa: PLC0415
|
|
||||||
|
|
||||||
relaxed_start = relaxed["start"]
|
|
||||||
relaxed_end = relaxed["end"]
|
|
||||||
|
|
||||||
for _idx, existing in periods_to_merge:
|
|
||||||
estimated_cv = _estimate_merged_cv(existing, relaxed)
|
|
||||||
if estimated_cv is not None and estimated_cv > PERIOD_MAX_CV:
|
|
||||||
_LOGGER.debug(
|
|
||||||
"Merge blocked by Quality Gate: %s-%s + %s-%s would have CV≈%.1f%% (max: %.1f%%)",
|
|
||||||
existing["start"].strftime("%H:%M"),
|
|
||||||
existing["end"].strftime("%H:%M"),
|
|
||||||
relaxed_start.strftime("%H:%M"),
|
|
||||||
relaxed_end.strftime("%H:%M"),
|
|
||||||
estimated_cv,
|
|
||||||
PERIOD_MAX_CV,
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def _would_swallow_existing(relaxed: dict, existing_periods: list[dict]) -> bool:
|
|
||||||
"""
|
|
||||||
Check if the relaxed period would "swallow" any existing period.
|
|
||||||
|
|
||||||
A period is "swallowed" if the new relaxed period completely contains it.
|
|
||||||
In this case, we should NOT merge - the existing smaller period is more
|
|
||||||
homogeneous and should be preserved.
|
|
||||||
|
|
||||||
This prevents relaxation from replacing good small periods with larger,
|
|
||||||
more heterogeneous ones.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if any existing period would be swallowed (merge should be blocked)
|
|
||||||
False if safe to proceed with merge evaluation
|
|
||||||
|
|
||||||
"""
|
|
||||||
relaxed_start = relaxed["start"]
|
|
||||||
relaxed_end = relaxed["end"]
|
|
||||||
|
|
||||||
for existing in existing_periods:
|
|
||||||
existing_start = existing["start"]
|
|
||||||
existing_end = existing["end"]
|
|
||||||
|
|
||||||
# Check if relaxed completely contains existing
|
|
||||||
if relaxed_start <= existing_start and relaxed_end >= existing_end:
|
|
||||||
_LOGGER.debug(
|
|
||||||
"Blocking merge: %s-%s would swallow %s-%s (keeping smaller period)",
|
|
||||||
relaxed_start.strftime("%H:%M"),
|
|
||||||
relaxed_end.strftime("%H:%M"),
|
|
||||||
existing_start.strftime("%H:%M"),
|
|
||||||
existing_end.strftime("%H:%M"),
|
|
||||||
)
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def _is_duplicate_period(relaxed: dict, existing_periods: list[dict], tolerance_seconds: int = 60) -> bool:
|
|
||||||
"""Check if relaxed period is a duplicate of any existing period."""
|
|
||||||
relaxed_start = relaxed["start"]
|
|
||||||
relaxed_end = relaxed["end"]
|
|
||||||
|
|
||||||
for existing in existing_periods:
|
|
||||||
if (
|
|
||||||
abs((relaxed_start - existing["start"]).total_seconds()) < tolerance_seconds
|
|
||||||
and abs((relaxed_end - existing["end"]).total_seconds()) < tolerance_seconds
|
|
||||||
):
|
|
||||||
_LOGGER_DETAILS.debug(
|
|
||||||
"%sSkipping duplicate period %s-%s (already exists)",
|
|
||||||
INDENT_L1,
|
|
||||||
relaxed_start.strftime("%H:%M"),
|
|
||||||
relaxed_end.strftime("%H:%M"),
|
|
||||||
)
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def _find_adjacent_or_overlapping(relaxed: dict, existing_periods: list[dict]) -> list[tuple[int, dict]]:
|
|
||||||
"""Find all periods that are adjacent to or overlapping with the relaxed period."""
|
|
||||||
relaxed_start = relaxed["start"]
|
|
||||||
relaxed_end = relaxed["end"]
|
|
||||||
periods_to_merge = []
|
|
||||||
|
|
||||||
for idx, existing in enumerate(existing_periods):
|
|
||||||
existing_start = existing["start"]
|
|
||||||
existing_end = existing["end"]
|
|
||||||
|
|
||||||
# Check if adjacent (no gap) or overlapping
|
|
||||||
is_adjacent = relaxed_end == existing_start or relaxed_start == existing_end
|
|
||||||
is_overlapping = relaxed_start < existing_end and relaxed_end > existing_start
|
|
||||||
|
|
||||||
if is_adjacent or is_overlapping:
|
|
||||||
periods_to_merge.append((idx, existing))
|
|
||||||
_LOGGER_DETAILS.debug(
|
|
||||||
"%sPeriod %s-%s %s with existing period %s-%s",
|
|
||||||
INDENT_L1,
|
|
||||||
relaxed_start.strftime("%H:%M"),
|
|
||||||
relaxed_end.strftime("%H:%M"),
|
|
||||||
"overlaps" if is_overlapping else "is adjacent to",
|
|
||||||
existing_start.strftime("%H:%M"),
|
|
||||||
existing_end.strftime("%H:%M"),
|
|
||||||
)
|
|
||||||
|
|
||||||
return periods_to_merge
|
|
||||||
|
|
||||||
|
|
||||||
def resolve_period_overlaps(
|
def resolve_period_overlaps(
|
||||||
existing_periods: list[dict],
|
existing_periods: list[dict],
|
||||||
new_relaxed_periods: list[dict],
|
new_relaxed_periods: list[dict],
|
||||||
|
|
@ -278,10 +130,6 @@ def resolve_period_overlaps(
|
||||||
Adjacent or overlapping periods are merged into single continuous periods.
|
Adjacent or overlapping periods are merged into single continuous periods.
|
||||||
The newer period's relaxation attributes override the older period's.
|
The newer period's relaxation attributes override the older period's.
|
||||||
|
|
||||||
Quality Gate: Merging is blocked if the combined period would have
|
|
||||||
an estimated CV above PERIOD_MAX_CV (25%), to prevent creating
|
|
||||||
periods with excessive internal price variation.
|
|
||||||
|
|
||||||
This function is called incrementally after each relaxation phase:
|
This function is called incrementally after each relaxation phase:
|
||||||
- Phase 1: existing = baseline, new = first relaxation
|
- Phase 1: existing = baseline, new = first relaxation
|
||||||
- Phase 2: existing = baseline + phase 1, new = second relaxation
|
- Phase 2: existing = baseline + phase 1, new = second relaxation
|
||||||
|
|
@ -297,7 +145,7 @@ def resolve_period_overlaps(
|
||||||
- new_periods_count: Number of new periods added (some may have been merged)
|
- new_periods_count: Number of new periods added (some may have been merged)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER.debug(
|
||||||
"%sresolve_period_overlaps called: existing=%d, new=%d",
|
"%sresolve_period_overlaps called: existing=%d, new=%d",
|
||||||
INDENT_L0,
|
INDENT_L0,
|
||||||
len(existing_periods),
|
len(existing_periods),
|
||||||
|
|
@ -319,60 +167,74 @@ def resolve_period_overlaps(
|
||||||
relaxed_end = relaxed["end"]
|
relaxed_end = relaxed["end"]
|
||||||
|
|
||||||
# Check if this period is duplicate (exact match within tolerance)
|
# Check if this period is duplicate (exact match within tolerance)
|
||||||
if _is_duplicate_period(relaxed, merged):
|
tolerance_seconds = 60 # 1 minute tolerance
|
||||||
continue
|
is_duplicate = False
|
||||||
|
for existing in merged:
|
||||||
|
if (
|
||||||
|
abs((relaxed_start - existing["start"]).total_seconds()) < tolerance_seconds
|
||||||
|
and abs((relaxed_end - existing["end"]).total_seconds()) < tolerance_seconds
|
||||||
|
):
|
||||||
|
is_duplicate = True
|
||||||
|
_LOGGER.debug(
|
||||||
|
"%sSkipping duplicate period %s-%s (already exists)",
|
||||||
|
INDENT_L1,
|
||||||
|
relaxed_start.strftime("%H:%M"),
|
||||||
|
relaxed_end.strftime("%H:%M"),
|
||||||
|
)
|
||||||
|
break
|
||||||
|
|
||||||
# Check if this period would "swallow" an existing smaller period
|
if is_duplicate:
|
||||||
# In that case, skip it - the smaller existing period is more homogeneous
|
|
||||||
if _would_swallow_existing(relaxed, merged):
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Find periods that are adjacent or overlapping (should be merged)
|
# Find periods that are adjacent or overlapping (should be merged)
|
||||||
periods_to_merge = _find_adjacent_or_overlapping(relaxed, merged)
|
periods_to_merge = []
|
||||||
|
for idx, existing in enumerate(merged):
|
||||||
|
existing_start = existing["start"]
|
||||||
|
existing_end = existing["end"]
|
||||||
|
|
||||||
|
# Check if adjacent (no gap) or overlapping
|
||||||
|
is_adjacent = relaxed_end == existing_start or relaxed_start == existing_end
|
||||||
|
is_overlapping = relaxed_start < existing_end and relaxed_end > existing_start
|
||||||
|
|
||||||
|
if is_adjacent or is_overlapping:
|
||||||
|
periods_to_merge.append((idx, existing))
|
||||||
|
_LOGGER.debug(
|
||||||
|
"%sPeriod %s-%s %s with existing period %s-%s",
|
||||||
|
INDENT_L1,
|
||||||
|
relaxed_start.strftime("%H:%M"),
|
||||||
|
relaxed_end.strftime("%H:%M"),
|
||||||
|
"overlaps" if is_overlapping else "is adjacent to",
|
||||||
|
existing_start.strftime("%H:%M"),
|
||||||
|
existing_end.strftime("%H:%M"),
|
||||||
|
)
|
||||||
|
|
||||||
if not periods_to_merge:
|
if not periods_to_merge:
|
||||||
# No merge needed - add as new period
|
# No merge needed - add as new period
|
||||||
merged.append(relaxed)
|
merged.append(relaxed)
|
||||||
periods_added += 1
|
periods_added += 1
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER.debug(
|
||||||
"%sAdded new period %s-%s (no overlap/adjacency)",
|
"%sAdded new period %s-%s (no overlap/adjacency)",
|
||||||
INDENT_L1,
|
INDENT_L1,
|
||||||
relaxed_start.strftime("%H:%M"),
|
relaxed_start.strftime("%H:%M"),
|
||||||
relaxed_end.strftime("%H:%M"),
|
relaxed_end.strftime("%H:%M"),
|
||||||
)
|
)
|
||||||
continue
|
else:
|
||||||
|
# Merge with all adjacent/overlapping periods
|
||||||
|
# Start with the new relaxed period
|
||||||
|
merged_period = relaxed.copy()
|
||||||
|
|
||||||
# Quality Gate: Check if merging would create a period that's too heterogeneous
|
# Remove old periods (in reverse order to maintain indices)
|
||||||
should_merge = _check_merge_quality_gate(periods_to_merge, relaxed)
|
for idx, existing in reversed(periods_to_merge):
|
||||||
|
merged_period = merge_adjacent_periods(existing, merged_period)
|
||||||
|
merged.pop(idx)
|
||||||
|
|
||||||
if not should_merge:
|
# Add the merged result
|
||||||
# Don't merge - add as separate period instead
|
merged.append(merged_period)
|
||||||
merged.append(relaxed)
|
|
||||||
periods_added += 1
|
|
||||||
_LOGGER_DETAILS.debug(
|
|
||||||
"%sAdded new period %s-%s separately (merge blocked by CV gate)",
|
|
||||||
INDENT_L1,
|
|
||||||
relaxed_start.strftime("%H:%M"),
|
|
||||||
relaxed_end.strftime("%H:%M"),
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Merge with all adjacent/overlapping periods
|
# Count as added if we merged exactly one existing period
|
||||||
# Start with the new relaxed period
|
# (means we extended/merged, not replaced multiple)
|
||||||
merged_period = relaxed.copy()
|
if len(periods_to_merge) == 1:
|
||||||
|
periods_added += 1
|
||||||
# Remove old periods (in reverse order to maintain indices)
|
|
||||||
for idx, existing in reversed(periods_to_merge):
|
|
||||||
merged_period = merge_adjacent_periods(existing, merged_period)
|
|
||||||
merged.pop(idx)
|
|
||||||
|
|
||||||
# Add the merged result
|
|
||||||
merged.append(merged_period)
|
|
||||||
|
|
||||||
# Count as added if we merged exactly one existing period
|
|
||||||
# (means we extended/merged, not replaced multiple)
|
|
||||||
if len(periods_to_merge) == 1:
|
|
||||||
periods_added += 1
|
|
||||||
|
|
||||||
# Sort all periods by start time
|
# Sort all periods by start time
|
||||||
merged.sort(key=lambda p: p["start"])
|
merged.sort(key=lambda p: p["start"])
|
||||||
|
|
|
||||||
|
|
@ -14,18 +14,15 @@ if TYPE_CHECKING:
|
||||||
TibberPricesPeriodStatistics,
|
TibberPricesPeriodStatistics,
|
||||||
TibberPricesThresholdConfig,
|
TibberPricesThresholdConfig,
|
||||||
)
|
)
|
||||||
|
|
||||||
from custom_components.tibber_prices.utils.average import calculate_median
|
|
||||||
from custom_components.tibber_prices.utils.price import (
|
from custom_components.tibber_prices.utils.price import (
|
||||||
aggregate_period_levels,
|
aggregate_period_levels,
|
||||||
aggregate_period_ratings,
|
aggregate_period_ratings,
|
||||||
calculate_coefficient_of_variation,
|
|
||||||
calculate_volatility_level,
|
calculate_volatility_level,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def calculate_period_price_diff(
|
def calculate_period_price_diff(
|
||||||
price_mean: float,
|
price_avg: float,
|
||||||
start_time: datetime,
|
start_time: datetime,
|
||||||
price_context: dict[str, Any],
|
price_context: dict[str, Any],
|
||||||
) -> tuple[float | None, float | None]:
|
) -> tuple[float | None, float | None]:
|
||||||
|
|
@ -34,11 +31,6 @@ def calculate_period_price_diff(
|
||||||
|
|
||||||
Uses reference price from start day of the period for consistency.
|
Uses reference price from start day of the period for consistency.
|
||||||
|
|
||||||
Args:
|
|
||||||
price_mean: Mean price of the period (in base currency).
|
|
||||||
start_time: Start time of the period.
|
|
||||||
price_context: Dictionary with ref_prices per day.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tuple of (period_price_diff, period_price_diff_pct) or (None, None) if no reference available.
|
Tuple of (period_price_diff, period_price_diff_pct) or (None, None) if no reference available.
|
||||||
|
|
||||||
|
|
@ -53,14 +45,14 @@ def calculate_period_price_diff(
|
||||||
if ref_price is None:
|
if ref_price is None:
|
||||||
return None, None
|
return None, None
|
||||||
|
|
||||||
# Both prices are in base currency, no conversion needed
|
# Convert reference price to minor units (ct/øre)
|
||||||
ref_price_display = round(ref_price, 4)
|
ref_price_minor = round(ref_price * 100, 2)
|
||||||
period_price_diff = round(price_mean - ref_price_display, 4)
|
period_price_diff = round(price_avg - ref_price_minor, 2)
|
||||||
period_price_diff_pct = None
|
period_price_diff_pct = None
|
||||||
if ref_price_display != 0:
|
if ref_price_minor != 0:
|
||||||
# CRITICAL: Use abs() for negative prices (same logic as calculate_difference_percentage)
|
# CRITICAL: Use abs() for negative prices (same logic as calculate_difference_percentage)
|
||||||
# Example: avg=-10, ref=-20 → diff=10, pct=10/abs(-20)*100=+50% (correctly shows more expensive)
|
# Example: avg=-10, ref=-20 → diff=10, pct=10/abs(-20)*100=+50% (correctly shows more expensive)
|
||||||
period_price_diff_pct = round((period_price_diff / abs(ref_price_display)) * 100, 2)
|
period_price_diff_pct = round((period_price_diff / abs(ref_price_minor)) * 100, 2)
|
||||||
|
|
||||||
return period_price_diff, period_price_diff_pct
|
return period_price_diff, period_price_diff_pct
|
||||||
|
|
||||||
|
|
@ -90,44 +82,34 @@ def calculate_aggregated_rating_difference(period_price_data: list[dict]) -> flo
|
||||||
return round(sum(differences) / len(differences), 2)
|
return round(sum(differences) / len(differences), 2)
|
||||||
|
|
||||||
|
|
||||||
def calculate_period_price_statistics(
|
def calculate_period_price_statistics(period_price_data: list[dict]) -> dict[str, float]:
|
||||||
period_price_data: list[dict],
|
|
||||||
) -> dict[str, float]:
|
|
||||||
"""
|
"""
|
||||||
Calculate price statistics for a period.
|
Calculate price statistics for a period.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
period_price_data: List of price data dictionaries with "total" field.
|
period_price_data: List of price data dictionaries with "total" field
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dictionary with price_mean, price_median, price_min, price_max, price_spread (all in base currency).
|
Dictionary with price_avg, price_min, price_max, price_spread (all in minor units: ct/øre)
|
||||||
Note: price_spread is calculated based on price_mean (max - min range as percentage of mean).
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Keep prices in base currency (Euro/NOK/SEK) for internal storage
|
prices_minor = [round(float(p["total"]) * 100, 2) for p in period_price_data]
|
||||||
# Conversion to display units (ct/øre) happens in services/formatting layer
|
|
||||||
factor = 1 # Always use base currency for storage
|
|
||||||
prices_display = [round(float(p["total"]) * factor, 4) for p in period_price_data]
|
|
||||||
|
|
||||||
if not prices_display:
|
if not prices_minor:
|
||||||
return {
|
return {
|
||||||
"price_mean": 0.0,
|
"price_avg": 0.0,
|
||||||
"price_median": 0.0,
|
|
||||||
"price_min": 0.0,
|
"price_min": 0.0,
|
||||||
"price_max": 0.0,
|
"price_max": 0.0,
|
||||||
"price_spread": 0.0,
|
"price_spread": 0.0,
|
||||||
}
|
}
|
||||||
|
|
||||||
price_mean = round(sum(prices_display) / len(prices_display), 4)
|
price_avg = round(sum(prices_minor) / len(prices_minor), 2)
|
||||||
median_value = calculate_median(prices_display)
|
price_min = round(min(prices_minor), 2)
|
||||||
price_median = round(median_value, 4) if median_value is not None else 0.0
|
price_max = round(max(prices_minor), 2)
|
||||||
price_min = round(min(prices_display), 4)
|
price_spread = round(price_max - price_min, 2)
|
||||||
price_max = round(max(prices_display), 4)
|
|
||||||
price_spread = round(price_max - price_min, 4)
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"price_mean": price_mean,
|
"price_avg": price_avg,
|
||||||
"price_median": price_median,
|
|
||||||
"price_min": price_min,
|
"price_min": price_min,
|
||||||
"price_max": price_max,
|
"price_max": price_max,
|
||||||
"price_spread": price_spread,
|
"price_spread": price_spread,
|
||||||
|
|
@ -165,12 +147,10 @@ def build_period_summary_dict(
|
||||||
"rating_level": stats.aggregated_rating,
|
"rating_level": stats.aggregated_rating,
|
||||||
"rating_difference_%": stats.rating_difference_pct,
|
"rating_difference_%": stats.rating_difference_pct,
|
||||||
# 3. Price statistics (how much does it cost?)
|
# 3. Price statistics (how much does it cost?)
|
||||||
"price_mean": stats.price_mean,
|
"price_avg": stats.price_avg,
|
||||||
"price_median": stats.price_median,
|
|
||||||
"price_min": stats.price_min,
|
"price_min": stats.price_min,
|
||||||
"price_max": stats.price_max,
|
"price_max": stats.price_max,
|
||||||
"price_spread": stats.price_spread,
|
"price_spread": stats.price_spread,
|
||||||
"price_coefficient_variation_%": stats.coefficient_of_variation,
|
|
||||||
"volatility": stats.volatility,
|
"volatility": stats.volatility,
|
||||||
# 4. Price differences will be added below if available
|
# 4. Price differences will be added below if available
|
||||||
# 5. Detail information (additional context)
|
# 5. Detail information (additional context)
|
||||||
|
|
@ -233,7 +213,7 @@ def extract_period_summaries(
|
||||||
|
|
||||||
Returns sensor-ready period summaries with:
|
Returns sensor-ready period summaries with:
|
||||||
- Timestamps and positioning (start, end, hour, minute, time)
|
- Timestamps and positioning (start, end, hour, minute, time)
|
||||||
- Aggregated price statistics (price_mean, price_median, price_min, price_max, price_spread)
|
- Aggregated price statistics (price_avg, price_min, price_max, price_spread)
|
||||||
- Volatility categorization (low/moderate/high/very_high based on coefficient of variation)
|
- Volatility categorization (low/moderate/high/very_high based on coefficient of variation)
|
||||||
- Rating difference percentage (aggregated from intervals)
|
- Rating difference percentage (aggregated from intervals)
|
||||||
- Period price differences (period_price_diff_from_daily_min/max)
|
- Period price differences (period_price_diff_from_daily_min/max)
|
||||||
|
|
@ -243,11 +223,11 @@ def extract_period_summaries(
|
||||||
All data is pre-calculated and ready for display - no further processing needed.
|
All data is pre-calculated and ready for display - no further processing needed.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
periods: List of periods, where each period is a list of interval dictionaries.
|
periods: List of periods, where each period is a list of interval dictionaries
|
||||||
all_prices: All price data from the API (enriched with level, difference, rating_level).
|
all_prices: All price data from the API (enriched with level, difference, rating_level)
|
||||||
price_context: Dictionary with ref_prices and avg_prices per day.
|
price_context: Dictionary with ref_prices and avg_prices per day
|
||||||
thresholds: Threshold configuration for calculations.
|
thresholds: Threshold configuration for calculations
|
||||||
time: TibberPricesTimeService instance (required).
|
time: TibberPricesTimeService instance (required)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
from .types import ( # noqa: PLC0415 - Avoid circular import
|
from .types import ( # noqa: PLC0415 - Avoid circular import
|
||||||
|
|
@ -305,21 +285,18 @@ def extract_period_summaries(
|
||||||
thresholds.threshold_high,
|
thresholds.threshold_high,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Calculate price statistics (in base currency, conversion happens in presentation layer)
|
# Calculate price statistics (in minor units: ct/øre)
|
||||||
price_stats = calculate_period_price_statistics(period_price_data)
|
price_stats = calculate_period_price_statistics(period_price_data)
|
||||||
|
|
||||||
# Calculate period price difference from daily reference
|
# Calculate period price difference from daily reference
|
||||||
period_price_diff, period_price_diff_pct = calculate_period_price_diff(
|
period_price_diff, period_price_diff_pct = calculate_period_price_diff(
|
||||||
price_stats["price_mean"], start_time, price_context
|
price_stats["price_avg"], start_time, price_context
|
||||||
)
|
)
|
||||||
|
|
||||||
# Extract prices for volatility calculation (coefficient of variation)
|
# Extract prices for volatility calculation (coefficient of variation)
|
||||||
prices_for_volatility = [float(p["total"]) for p in period_price_data if "total" in p]
|
prices_for_volatility = [float(p["total"]) for p in period_price_data if "total" in p]
|
||||||
|
|
||||||
# Calculate CV (numeric) for quality gate checks
|
# Calculate volatility (categorical) and aggregated rating difference (numeric)
|
||||||
period_cv = calculate_coefficient_of_variation(prices_for_volatility)
|
|
||||||
|
|
||||||
# Calculate volatility (categorical) using thresholds
|
|
||||||
volatility = calculate_volatility_level(
|
volatility = calculate_volatility_level(
|
||||||
prices_for_volatility,
|
prices_for_volatility,
|
||||||
threshold_moderate=thresholds.threshold_volatility_moderate,
|
threshold_moderate=thresholds.threshold_volatility_moderate,
|
||||||
|
|
@ -347,13 +324,11 @@ def extract_period_summaries(
|
||||||
aggregated_level=aggregated_level,
|
aggregated_level=aggregated_level,
|
||||||
aggregated_rating=aggregated_rating,
|
aggregated_rating=aggregated_rating,
|
||||||
rating_difference_pct=rating_difference_pct,
|
rating_difference_pct=rating_difference_pct,
|
||||||
price_mean=price_stats["price_mean"],
|
price_avg=price_stats["price_avg"],
|
||||||
price_median=price_stats["price_median"],
|
|
||||||
price_min=price_stats["price_min"],
|
price_min=price_stats["price_min"],
|
||||||
price_max=price_stats["price_max"],
|
price_max=price_stats["price_max"],
|
||||||
price_spread=price_stats["price_spread"],
|
price_spread=price_stats["price_spread"],
|
||||||
volatility=volatility,
|
volatility=volatility,
|
||||||
coefficient_of_variation=round(period_cv, 1) if period_cv is not None else None,
|
|
||||||
period_price_diff=period_price_diff,
|
period_price_diff=period_price_diff,
|
||||||
period_price_diff_pct=period_price_diff_pct,
|
period_price_diff_pct=period_price_diff_pct,
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,7 @@ if TYPE_CHECKING:
|
||||||
|
|
||||||
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
||||||
|
|
||||||
from custom_components.tibber_prices.utils.price import calculate_coefficient_of_variation
|
from .types import TibberPricesPeriodConfig
|
||||||
|
|
||||||
from .period_overlap import (
|
from .period_overlap import (
|
||||||
recalculate_period_metadata,
|
recalculate_period_metadata,
|
||||||
|
|
@ -21,8 +21,6 @@ from .types import (
|
||||||
INDENT_L0,
|
INDENT_L0,
|
||||||
INDENT_L1,
|
INDENT_L1,
|
||||||
INDENT_L2,
|
INDENT_L2,
|
||||||
PERIOD_MAX_CV,
|
|
||||||
TibberPricesPeriodConfig,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
_LOGGER = logging.getLogger(__name__)
|
||||||
|
|
@ -34,125 +32,6 @@ FLEX_WARNING_THRESHOLD_RELAXATION = 0.25 # 25% - INFO: suggest lowering to 15-2
|
||||||
MAX_FLEX_HARD_LIMIT = 0.50 # 50% - hard maximum flex value
|
MAX_FLEX_HARD_LIMIT = 0.50 # 50% - hard maximum flex value
|
||||||
FLEX_HIGH_THRESHOLD_RELAXATION = 0.30 # 30% - WARNING: base flex too high for relaxation mode
|
FLEX_HIGH_THRESHOLD_RELAXATION = 0.30 # 30% - WARNING: base flex too high for relaxation mode
|
||||||
|
|
||||||
# Min duration fallback constants
|
|
||||||
# When all relaxation phases are exhausted and still no periods found,
|
|
||||||
# gradually reduce min_period_length to find at least something
|
|
||||||
MIN_DURATION_FALLBACK_MINIMUM = 30 # Minimum period length to try (30 min = 2 intervals)
|
|
||||||
MIN_DURATION_FALLBACK_STEP = 15 # Reduce by 15 min (1 interval) each step
|
|
||||||
|
|
||||||
|
|
||||||
def _check_period_quality(
|
|
||||||
period: dict, all_prices: list[dict], *, time: TibberPricesTimeService
|
|
||||||
) -> tuple[bool, float | None]:
|
|
||||||
"""
|
|
||||||
Check if a period passes the quality gate (internal CV not too high).
|
|
||||||
|
|
||||||
The Quality Gate prevents relaxation from creating periods with too much
|
|
||||||
internal price variation. A "best price period" with prices ranging from
|
|
||||||
0.5 to 1.0 kr/kWh is not useful - user can't trust it's actually "best".
|
|
||||||
|
|
||||||
Args:
|
|
||||||
period: Period summary dict with "start" and "end" datetime
|
|
||||||
all_prices: All price intervals (to look up prices for CV calculation)
|
|
||||||
time: Time service for interval time parsing
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple of (passes_quality_gate, cv_value)
|
|
||||||
- passes_quality_gate: True if CV <= PERIOD_MAX_CV
|
|
||||||
- cv_value: Calculated CV as percentage, or None if not calculable
|
|
||||||
|
|
||||||
"""
|
|
||||||
start_time = period.get("start")
|
|
||||||
end_time = period.get("end")
|
|
||||||
|
|
||||||
if not start_time or not end_time:
|
|
||||||
return True, None # Can't check, assume OK
|
|
||||||
|
|
||||||
# Build lookup for prices
|
|
||||||
price_lookup: dict[str, float] = {}
|
|
||||||
for price_data in all_prices:
|
|
||||||
interval_time = time.get_interval_time(price_data)
|
|
||||||
if interval_time:
|
|
||||||
price_lookup[interval_time.isoformat()] = float(price_data["total"])
|
|
||||||
|
|
||||||
# Collect prices within the period
|
|
||||||
period_prices: list[float] = []
|
|
||||||
interval_duration = time.get_interval_duration()
|
|
||||||
|
|
||||||
current = start_time
|
|
||||||
while current < end_time:
|
|
||||||
price = price_lookup.get(current.isoformat())
|
|
||||||
if price is not None:
|
|
||||||
period_prices.append(price)
|
|
||||||
current = current + interval_duration
|
|
||||||
|
|
||||||
# Need at least 2 prices to calculate CV (same as MIN_PRICES_FOR_VOLATILITY in price.py)
|
|
||||||
min_prices_for_cv = 2
|
|
||||||
if len(period_prices) < min_prices_for_cv:
|
|
||||||
return True, None # Too few prices to calculate CV
|
|
||||||
|
|
||||||
cv = calculate_coefficient_of_variation(period_prices)
|
|
||||||
if cv is None:
|
|
||||||
return True, None
|
|
||||||
|
|
||||||
passes = cv <= PERIOD_MAX_CV
|
|
||||||
return passes, cv
|
|
||||||
|
|
||||||
|
|
||||||
def _count_quality_periods(
|
|
||||||
periods: list[dict],
|
|
||||||
all_prices: list[dict],
|
|
||||||
prices_by_day: dict[date, list[dict]],
|
|
||||||
min_periods: int,
|
|
||||||
*,
|
|
||||||
time: TibberPricesTimeService,
|
|
||||||
) -> tuple[int, int]:
|
|
||||||
"""
|
|
||||||
Count days meeting requirement when considering quality gate.
|
|
||||||
|
|
||||||
Only periods passing the quality gate (CV <= PERIOD_MAX_CV) are counted
|
|
||||||
towards meeting the min_periods requirement.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
periods: List of all periods
|
|
||||||
all_prices: All price intervals
|
|
||||||
prices_by_day: Price intervals grouped by day
|
|
||||||
min_periods: Target periods per day
|
|
||||||
time: Time service
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple of (days_meeting_requirement, total_quality_periods)
|
|
||||||
|
|
||||||
"""
|
|
||||||
periods_by_day = group_periods_by_day(periods)
|
|
||||||
days_meeting_requirement = 0
|
|
||||||
total_quality_periods = 0
|
|
||||||
|
|
||||||
for day in sorted(prices_by_day.keys()):
|
|
||||||
day_periods = periods_by_day.get(day, [])
|
|
||||||
quality_count = 0
|
|
||||||
|
|
||||||
for period in day_periods:
|
|
||||||
passes, cv = _check_period_quality(period, all_prices, time=time)
|
|
||||||
if passes:
|
|
||||||
quality_count += 1
|
|
||||||
else:
|
|
||||||
_LOGGER_DETAILS.debug(
|
|
||||||
"%s Day %s: Period %s-%s REJECTED by quality gate (CV=%.1f%% > %.1f%%)",
|
|
||||||
INDENT_L2,
|
|
||||||
day,
|
|
||||||
period.get("start", "?").strftime("%H:%M") if hasattr(period.get("start"), "strftime") else "?",
|
|
||||||
period.get("end", "?").strftime("%H:%M") if hasattr(period.get("end"), "strftime") else "?",
|
|
||||||
cv or 0,
|
|
||||||
PERIOD_MAX_CV,
|
|
||||||
)
|
|
||||||
|
|
||||||
total_quality_periods += quality_count
|
|
||||||
if quality_count >= min_periods:
|
|
||||||
days_meeting_requirement += 1
|
|
||||||
|
|
||||||
return days_meeting_requirement, total_quality_periods
|
|
||||||
|
|
||||||
|
|
||||||
def group_periods_by_day(periods: list[dict]) -> dict[date, list[dict]]:
|
def group_periods_by_day(periods: list[dict]) -> dict[date, list[dict]]:
|
||||||
"""
|
"""
|
||||||
|
|
@ -258,167 +137,7 @@ def group_prices_by_day(all_prices: list[dict], *, time: TibberPricesTimeService
|
||||||
return prices_by_day
|
return prices_by_day
|
||||||
|
|
||||||
|
|
||||||
def _try_min_duration_fallback(
|
def calculate_periods_with_relaxation( # noqa: PLR0913, PLR0915 - Per-day relaxation requires many parameters and statements
|
||||||
*,
|
|
||||||
config: TibberPricesPeriodConfig,
|
|
||||||
existing_periods: list[dict],
|
|
||||||
prices_by_day: dict[date, list[dict]],
|
|
||||||
time: TibberPricesTimeService,
|
|
||||||
) -> tuple[dict[str, Any] | None, dict[str, Any]]:
|
|
||||||
"""
|
|
||||||
Try reducing min_period_length to find periods when relaxation is exhausted.
|
|
||||||
|
|
||||||
This is a LAST RESORT mechanism. It only activates when:
|
|
||||||
1. All relaxation phases have been tried
|
|
||||||
2. Some days STILL have zero periods (not just below min_periods)
|
|
||||||
|
|
||||||
The fallback progressively reduces min_period_length:
|
|
||||||
- 60 min (default) → 45 min → 30 min (minimum)
|
|
||||||
|
|
||||||
It does NOT reduce below 30 min (2 intervals) because a single 15-min
|
|
||||||
interval is essentially just the daily min/max price - not a "period".
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config: Period configuration
|
|
||||||
existing_periods: Periods found so far (from relaxation)
|
|
||||||
prices_by_day: Price intervals grouped by day
|
|
||||||
time: Time service instance
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple of (result dict with periods, metadata dict) or (None, empty metadata)
|
|
||||||
|
|
||||||
"""
|
|
||||||
from .core import calculate_periods # noqa: PLC0415 - Avoid circular import
|
|
||||||
|
|
||||||
metadata: dict[str, Any] = {"phases_used": [], "fallback_active": False}
|
|
||||||
|
|
||||||
# Only try fallback if current min_period_length > minimum
|
|
||||||
if config.min_period_length <= MIN_DURATION_FALLBACK_MINIMUM:
|
|
||||||
return None, metadata
|
|
||||||
|
|
||||||
# Check which days have ZERO periods (not just below target)
|
|
||||||
existing_by_day = group_periods_by_day(existing_periods)
|
|
||||||
days_with_zero_periods = [day for day in prices_by_day if not existing_by_day.get(day)]
|
|
||||||
|
|
||||||
if not days_with_zero_periods:
|
|
||||||
_LOGGER_DETAILS.debug(
|
|
||||||
"%sMin duration fallback: All days have at least one period - no fallback needed",
|
|
||||||
INDENT_L1,
|
|
||||||
)
|
|
||||||
return None, metadata
|
|
||||||
|
|
||||||
_LOGGER.info(
|
|
||||||
"Min duration fallback: %d day(s) have zero periods, trying shorter min_period_length...",
|
|
||||||
len(days_with_zero_periods),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Try progressively shorter min_period_length
|
|
||||||
current_min_duration = config.min_period_length
|
|
||||||
fallback_periods: list[dict] = []
|
|
||||||
|
|
||||||
while current_min_duration > MIN_DURATION_FALLBACK_MINIMUM:
|
|
||||||
current_min_duration = max(
|
|
||||||
current_min_duration - MIN_DURATION_FALLBACK_STEP,
|
|
||||||
MIN_DURATION_FALLBACK_MINIMUM,
|
|
||||||
)
|
|
||||||
|
|
||||||
_LOGGER_DETAILS.debug(
|
|
||||||
"%sTrying min_period_length=%d min for days with zero periods",
|
|
||||||
INDENT_L2,
|
|
||||||
current_min_duration,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create modified config with shorter min_period_length
|
|
||||||
# Use maxed-out flex (50%) since we're in fallback mode
|
|
||||||
fallback_config = TibberPricesPeriodConfig(
|
|
||||||
reverse_sort=config.reverse_sort,
|
|
||||||
flex=MAX_FLEX_HARD_LIMIT, # Max flex
|
|
||||||
min_distance_from_avg=0, # Disable min_distance in fallback
|
|
||||||
min_period_length=current_min_duration,
|
|
||||||
threshold_low=config.threshold_low,
|
|
||||||
threshold_high=config.threshold_high,
|
|
||||||
threshold_volatility_moderate=config.threshold_volatility_moderate,
|
|
||||||
threshold_volatility_high=config.threshold_volatility_high,
|
|
||||||
threshold_volatility_very_high=config.threshold_volatility_very_high,
|
|
||||||
level_filter=None, # Disable level filter
|
|
||||||
gap_count=config.gap_count,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Try to find periods for days with zero periods
|
|
||||||
for day in days_with_zero_periods:
|
|
||||||
day_prices = prices_by_day.get(day, [])
|
|
||||||
if not day_prices:
|
|
||||||
continue
|
|
||||||
|
|
||||||
try:
|
|
||||||
day_result = calculate_periods(
|
|
||||||
day_prices,
|
|
||||||
config=fallback_config,
|
|
||||||
time=time,
|
|
||||||
)
|
|
||||||
|
|
||||||
day_periods = day_result.get("periods", [])
|
|
||||||
if day_periods:
|
|
||||||
# Mark periods with fallback metadata
|
|
||||||
for period in day_periods:
|
|
||||||
period["duration_fallback_active"] = True
|
|
||||||
period["duration_fallback_min_length"] = current_min_duration
|
|
||||||
period["relaxation_active"] = True
|
|
||||||
period["relaxation_level"] = f"duration_fallback={current_min_duration}min"
|
|
||||||
|
|
||||||
fallback_periods.extend(day_periods)
|
|
||||||
_LOGGER.info(
|
|
||||||
"Min duration fallback: Found %d period(s) for %s at min_length=%d min",
|
|
||||||
len(day_periods),
|
|
||||||
day,
|
|
||||||
current_min_duration,
|
|
||||||
)
|
|
||||||
|
|
||||||
except (KeyError, ValueError, TypeError) as err:
|
|
||||||
_LOGGER.warning(
|
|
||||||
"Error during min duration fallback for %s: %s",
|
|
||||||
day,
|
|
||||||
err,
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# If we found periods for all zero-period days, we can stop
|
|
||||||
if fallback_periods:
|
|
||||||
# Remove days that now have periods from the list
|
|
||||||
fallback_by_day = group_periods_by_day(fallback_periods)
|
|
||||||
days_with_zero_periods = [day for day in days_with_zero_periods if not fallback_by_day.get(day)]
|
|
||||||
|
|
||||||
if not days_with_zero_periods:
|
|
||||||
break
|
|
||||||
|
|
||||||
if fallback_periods:
|
|
||||||
# Merge with existing periods
|
|
||||||
# resolve_period_overlaps merges adjacent/overlapping periods
|
|
||||||
merged_periods, _new_count = resolve_period_overlaps(
|
|
||||||
existing_periods,
|
|
||||||
fallback_periods,
|
|
||||||
)
|
|
||||||
recalculate_period_metadata(merged_periods, time=time)
|
|
||||||
|
|
||||||
metadata["fallback_active"] = True
|
|
||||||
metadata["phases_used"] = [f"duration_fallback (min_length={current_min_duration}min)"]
|
|
||||||
|
|
||||||
_LOGGER.info(
|
|
||||||
"Min duration fallback complete: Added %d period(s), total now %d",
|
|
||||||
len(fallback_periods),
|
|
||||||
len(merged_periods),
|
|
||||||
)
|
|
||||||
|
|
||||||
return {"periods": merged_periods}, metadata
|
|
||||||
|
|
||||||
_LOGGER.warning(
|
|
||||||
"Min duration fallback: Still %d day(s) with zero periods after trying all durations",
|
|
||||||
len(days_with_zero_periods),
|
|
||||||
)
|
|
||||||
return None, metadata
|
|
||||||
|
|
||||||
|
|
||||||
def calculate_periods_with_relaxation( # noqa: PLR0912, PLR0913, PLR0915 - Per-day relaxation requires many parameters and branches
|
|
||||||
all_prices: list[dict],
|
all_prices: list[dict],
|
||||||
*,
|
*,
|
||||||
config: TibberPricesPeriodConfig,
|
config: TibberPricesPeriodConfig,
|
||||||
|
|
@ -427,7 +146,6 @@ def calculate_periods_with_relaxation( # noqa: PLR0912, PLR0913, PLR0915 - Per-
|
||||||
max_relaxation_attempts: int,
|
max_relaxation_attempts: int,
|
||||||
should_show_callback: Callable[[str | None], bool],
|
should_show_callback: Callable[[str | None], bool],
|
||||||
time: TibberPricesTimeService,
|
time: TibberPricesTimeService,
|
||||||
config_entry: Any, # ConfigEntry type
|
|
||||||
) -> dict[str, Any]:
|
) -> dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Calculate periods with optional per-day filter relaxation.
|
Calculate periods with optional per-day filter relaxation.
|
||||||
|
|
@ -452,8 +170,7 @@ def calculate_periods_with_relaxation( # noqa: PLR0912, PLR0913, PLR0915 - Per-
|
||||||
should_show_callback: Callback function(level_override) -> bool
|
should_show_callback: Callback function(level_override) -> bool
|
||||||
Returns True if periods should be shown with given filter overrides. Pass None
|
Returns True if periods should be shown with given filter overrides. Pass None
|
||||||
to use original configured filter values.
|
to use original configured filter values.
|
||||||
time: TibberPricesTimeService instance (required).
|
time: TibberPricesTimeService instance (required)
|
||||||
config_entry: Config entry to get display unit configuration.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dict with same format as calculate_periods() output:
|
Dict with same format as calculate_periods() output:
|
||||||
|
|
@ -466,9 +183,6 @@ def calculate_periods_with_relaxation( # noqa: PLR0912, PLR0913, PLR0915 - Per-
|
||||||
from .core import ( # noqa: PLC0415
|
from .core import ( # noqa: PLC0415
|
||||||
calculate_periods,
|
calculate_periods,
|
||||||
)
|
)
|
||||||
from .period_building import ( # noqa: PLC0415
|
|
||||||
filter_superseded_periods,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Compact INFO-level summary
|
# Compact INFO-level summary
|
||||||
period_type = "PEAK PRICE" if config.reverse_sort else "BEST PRICE"
|
period_type = "PEAK PRICE" if config.reverse_sort else "BEST PRICE"
|
||||||
|
|
@ -560,8 +274,7 @@ def calculate_periods_with_relaxation( # noqa: PLR0912, PLR0913, PLR0915 - Per-
|
||||||
)
|
)
|
||||||
|
|
||||||
# === BASELINE CALCULATION (process ALL prices together, including yesterday) ===
|
# === BASELINE CALCULATION (process ALL prices together, including yesterday) ===
|
||||||
# Periods that ended before yesterday will be filtered out later by filter_periods_by_end_date()
|
# Periods that ended yesterday will be filtered out later by filter_periods_by_end_date()
|
||||||
# This keeps yesterday/today/tomorrow periods in the cache
|
|
||||||
baseline_result = calculate_periods(all_prices, config=config, time=time)
|
baseline_result = calculate_periods(all_prices, config=config, time=time)
|
||||||
all_periods = baseline_result["periods"]
|
all_periods = baseline_result["periods"]
|
||||||
|
|
||||||
|
|
@ -607,7 +320,6 @@ def calculate_periods_with_relaxation( # noqa: PLR0912, PLR0913, PLR0915 - Per-
|
||||||
should_show_callback=should_show_callback,
|
should_show_callback=should_show_callback,
|
||||||
baseline_periods=all_periods,
|
baseline_periods=all_periods,
|
||||||
time=time,
|
time=time,
|
||||||
config_entry=config_entry,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
all_periods = relaxed_result["periods"]
|
all_periods = relaxed_result["periods"]
|
||||||
|
|
@ -622,37 +334,6 @@ def calculate_periods_with_relaxation( # noqa: PLR0912, PLR0913, PLR0915 - Per-
|
||||||
period_count = len(day_periods)
|
period_count = len(day_periods)
|
||||||
if period_count >= min_periods:
|
if period_count >= min_periods:
|
||||||
days_meeting_requirement += 1
|
days_meeting_requirement += 1
|
||||||
|
|
||||||
# === MIN DURATION FALLBACK ===
|
|
||||||
# If still no periods after relaxation, try reducing min_period_length
|
|
||||||
# This is a last resort to ensure users always get SOME period
|
|
||||||
if days_meeting_requirement < total_days and config.min_period_length > MIN_DURATION_FALLBACK_MINIMUM:
|
|
||||||
_LOGGER.info(
|
|
||||||
"Relaxation incomplete (%d/%d days). Trying min_duration fallback...",
|
|
||||||
days_meeting_requirement,
|
|
||||||
total_days,
|
|
||||||
)
|
|
||||||
|
|
||||||
fallback_result, fallback_metadata = _try_min_duration_fallback(
|
|
||||||
config=config,
|
|
||||||
existing_periods=all_periods,
|
|
||||||
prices_by_day=prices_by_day,
|
|
||||||
time=time,
|
|
||||||
)
|
|
||||||
|
|
||||||
if fallback_result:
|
|
||||||
all_periods = fallback_result["periods"]
|
|
||||||
all_phases_used.extend(fallback_metadata.get("phases_used", []))
|
|
||||||
|
|
||||||
# Recount after fallback
|
|
||||||
periods_by_day = group_periods_by_day(all_periods)
|
|
||||||
days_meeting_requirement = 0
|
|
||||||
for day in sorted(prices_by_day.keys()):
|
|
||||||
day_periods = periods_by_day.get(day, [])
|
|
||||||
period_count = len(day_periods)
|
|
||||||
if period_count >= min_periods:
|
|
||||||
days_meeting_requirement += 1
|
|
||||||
|
|
||||||
elif enable_relaxation:
|
elif enable_relaxation:
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"%sAll %d days met target with baseline - no relaxation needed",
|
"%sAll %d days met target with baseline - no relaxation needed",
|
||||||
|
|
@ -666,14 +347,6 @@ def calculate_periods_with_relaxation( # noqa: PLR0912, PLR0913, PLR0915 - Per-
|
||||||
# Recalculate metadata for combined periods
|
# Recalculate metadata for combined periods
|
||||||
recalculate_period_metadata(all_periods, time=time)
|
recalculate_period_metadata(all_periods, time=time)
|
||||||
|
|
||||||
# Apply cross-day supersession filter (only for best-price periods)
|
|
||||||
# This removes late-night today periods that are superseded by better tomorrow alternatives
|
|
||||||
all_periods = filter_superseded_periods(
|
|
||||||
all_periods,
|
|
||||||
time=time,
|
|
||||||
reverse_sort=config.reverse_sort,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Build final result
|
# Build final result
|
||||||
final_result = baseline_result.copy()
|
final_result = baseline_result.copy()
|
||||||
final_result["periods"] = all_periods
|
final_result["periods"] = all_periods
|
||||||
|
|
@ -706,7 +379,6 @@ def relax_all_prices( # noqa: PLR0913 - Comprehensive filter relaxation require
|
||||||
baseline_periods: list[dict],
|
baseline_periods: list[dict],
|
||||||
*,
|
*,
|
||||||
time: TibberPricesTimeService,
|
time: TibberPricesTimeService,
|
||||||
config_entry: Any, # ConfigEntry type
|
|
||||||
) -> tuple[dict[str, Any], dict[str, Any]]:
|
) -> tuple[dict[str, Any], dict[str, Any]]:
|
||||||
"""
|
"""
|
||||||
Relax filters for all prices until min_periods per day is reached.
|
Relax filters for all prices until min_periods per day is reached.
|
||||||
|
|
@ -717,14 +389,13 @@ def relax_all_prices( # noqa: PLR0913 - Comprehensive filter relaxation require
|
||||||
(or max attempts exhausted).
|
(or max attempts exhausted).
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
all_prices: All price intervals (yesterday+today+tomorrow).
|
all_prices: All price intervals (yesterday+today+tomorrow)
|
||||||
config: Base period configuration.
|
config: Base period configuration
|
||||||
min_periods: Target number of periods PER DAY.
|
min_periods: Target number of periods PER DAY
|
||||||
max_relaxation_attempts: Maximum flex levels to try.
|
max_relaxation_attempts: Maximum flex levels to try
|
||||||
should_show_callback: Callback to check if a flex level should be shown.
|
should_show_callback: Callback to check if a flex level should be shown
|
||||||
baseline_periods: Baseline periods (before relaxation).
|
baseline_periods: Baseline periods (before relaxation)
|
||||||
time: TibberPricesTimeService instance.
|
time: TibberPricesTimeService instance
|
||||||
config_entry: Config entry to get display unit configuration.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tuple of (result_dict, metadata_dict)
|
Tuple of (result_dict, metadata_dict)
|
||||||
|
|
@ -814,11 +485,23 @@ def relax_all_prices( # noqa: PLR0913 - Comprehensive filter relaxation require
|
||||||
new_relaxed_periods=new_periods,
|
new_relaxed_periods=new_periods,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Count periods per day with QUALITY GATE check
|
# Count periods per day to check if requirement met
|
||||||
# Only periods with CV <= PERIOD_MAX_CV count towards min_periods requirement
|
periods_by_day = group_periods_by_day(combined)
|
||||||
days_meeting_requirement, quality_period_count = _count_quality_periods(
|
days_meeting_requirement = 0
|
||||||
combined, all_prices, prices_by_day, min_periods, time=time
|
|
||||||
)
|
for day in sorted(prices_by_day.keys()):
|
||||||
|
day_periods = periods_by_day.get(day, [])
|
||||||
|
period_count = len(day_periods)
|
||||||
|
if period_count >= min_periods:
|
||||||
|
days_meeting_requirement += 1
|
||||||
|
|
||||||
|
_LOGGER_DETAILS.debug(
|
||||||
|
"%s Day %s: %d periods%s",
|
||||||
|
INDENT_L2,
|
||||||
|
day,
|
||||||
|
period_count,
|
||||||
|
" ✓" if period_count >= min_periods else f" (need {min_periods})",
|
||||||
|
)
|
||||||
|
|
||||||
total_periods = len(combined)
|
total_periods = len(combined)
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
|
|
|
||||||
|
|
@ -15,24 +15,6 @@ from custom_components.tibber_prices.const import (
|
||||||
DEFAULT_VOLATILITY_THRESHOLD_VERY_HIGH,
|
DEFAULT_VOLATILITY_THRESHOLD_VERY_HIGH,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Quality Gate: Maximum coefficient of variation (CV) allowed within a period
|
|
||||||
# Periods with internal CV above this are considered too heterogeneous for "best price"
|
|
||||||
# A 25% CV means the std dev is 25% of the mean - beyond this, prices vary too much
|
|
||||||
# Example: Period with prices 0.7-0.99 kr has ~15% CV which is acceptable
|
|
||||||
# Period with prices 0.5-1.0 kr has ~30% CV which would be rejected
|
|
||||||
PERIOD_MAX_CV = 25.0 # 25% max coefficient of variation within a period
|
|
||||||
|
|
||||||
# Cross-Day Extension: Time window constants
|
|
||||||
# When a period ends late in the day and tomorrow data is available,
|
|
||||||
# we can extend it past midnight if prices remain favorable
|
|
||||||
CROSS_DAY_LATE_PERIOD_START_HOUR = 20 # Consider periods starting at 20:00 or later for extension
|
|
||||||
CROSS_DAY_MAX_EXTENSION_HOUR = 8 # Don't extend beyond 08:00 next day (covers typical night low)
|
|
||||||
|
|
||||||
# Cross-Day Supersession: When tomorrow data arrives, late-night periods that are
|
|
||||||
# worse than early-morning tomorrow periods become obsolete
|
|
||||||
# A today period is "superseded" if tomorrow has a significantly better alternative
|
|
||||||
SUPERSESSION_PRICE_IMPROVEMENT_PCT = 10.0 # Tomorrow must be at least 10% cheaper to supersede
|
|
||||||
|
|
||||||
# Log indentation levels for visual hierarchy
|
# Log indentation levels for visual hierarchy
|
||||||
INDENT_L0 = "" # Top level (calculate_periods_with_relaxation)
|
INDENT_L0 = "" # Top level (calculate_periods_with_relaxation)
|
||||||
INDENT_L1 = " " # Per-day loop
|
INDENT_L1 = " " # Per-day loop
|
||||||
|
|
@ -74,13 +56,11 @@ class TibberPricesPeriodStatistics(NamedTuple):
|
||||||
aggregated_level: str | None
|
aggregated_level: str | None
|
||||||
aggregated_rating: str | None
|
aggregated_rating: str | None
|
||||||
rating_difference_pct: float | None
|
rating_difference_pct: float | None
|
||||||
price_mean: float
|
price_avg: float
|
||||||
price_median: float
|
|
||||||
price_min: float
|
price_min: float
|
||||||
price_max: float
|
price_max: float
|
||||||
price_spread: float
|
price_spread: float
|
||||||
volatility: str
|
volatility: str
|
||||||
coefficient_of_variation: float | None # CV as percentage (e.g., 15.0 for 15%)
|
|
||||||
period_price_diff: float | None
|
period_price_diff: float | None
|
||||||
period_price_diff_pct: float | None
|
period_price_diff_pct: float | None
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -13,8 +13,6 @@ from typing import TYPE_CHECKING, Any
|
||||||
from custom_components.tibber_prices import const as _const
|
from custom_components.tibber_prices import const as _const
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from collections.abc import Callable
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
||||||
from homeassistant.config_entries import ConfigEntry
|
from homeassistant.config_entries import ConfigEntry
|
||||||
|
|
||||||
|
|
@ -34,7 +32,6 @@ class TibberPricesPeriodCalculator:
|
||||||
self,
|
self,
|
||||||
config_entry: ConfigEntry,
|
config_entry: ConfigEntry,
|
||||||
log_prefix: str,
|
log_prefix: str,
|
||||||
get_config_override_fn: Callable[[str, str], Any | None] | None = None,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Initialize the period calculator."""
|
"""Initialize the period calculator."""
|
||||||
self.config_entry = config_entry
|
self.config_entry = config_entry
|
||||||
|
|
@ -42,40 +39,11 @@ class TibberPricesPeriodCalculator:
|
||||||
self.time: TibberPricesTimeService # Set by coordinator before first use
|
self.time: TibberPricesTimeService # Set by coordinator before first use
|
||||||
self._config_cache: dict[str, dict[str, Any]] | None = None
|
self._config_cache: dict[str, dict[str, Any]] | None = None
|
||||||
self._config_cache_valid = False
|
self._config_cache_valid = False
|
||||||
self._get_config_override = get_config_override_fn
|
|
||||||
|
|
||||||
# Period calculation cache
|
# Period calculation cache
|
||||||
self._cached_periods: dict[str, Any] | None = None
|
self._cached_periods: dict[str, Any] | None = None
|
||||||
self._last_periods_hash: str | None = None
|
self._last_periods_hash: str | None = None
|
||||||
|
|
||||||
def _get_option(
|
|
||||||
self,
|
|
||||||
config_key: str,
|
|
||||||
config_section: str,
|
|
||||||
default: Any,
|
|
||||||
) -> Any:
|
|
||||||
"""
|
|
||||||
Get a config option, checking overrides first.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config_key: The configuration key
|
|
||||||
config_section: The section in options (e.g., "flexibility_settings")
|
|
||||||
default: Default value if not set
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Override value if set, otherwise options value, otherwise default
|
|
||||||
|
|
||||||
"""
|
|
||||||
# Check overrides first
|
|
||||||
if self._get_config_override is not None:
|
|
||||||
override = self._get_config_override(config_key, config_section)
|
|
||||||
if override is not None:
|
|
||||||
return override
|
|
||||||
|
|
||||||
# Fall back to options
|
|
||||||
section = self.config_entry.options.get(config_section, {})
|
|
||||||
return section.get(config_key, default)
|
|
||||||
|
|
||||||
def _log(self, level: str, message: str, *args: object, **kwargs: object) -> None:
|
def _log(self, level: str, message: str, *args: object, **kwargs: object) -> None:
|
||||||
"""Log with calculator-specific prefix."""
|
"""Log with calculator-specific prefix."""
|
||||||
prefixed_message = f"{self._log_prefix} {message}"
|
prefixed_message = f"{self._log_prefix} {message}"
|
||||||
|
|
@ -95,7 +63,7 @@ class TibberPricesPeriodCalculator:
|
||||||
Compute hash of price data and config for period calculation caching.
|
Compute hash of price data and config for period calculation caching.
|
||||||
|
|
||||||
Only includes data that affects period calculation:
|
Only includes data that affects period calculation:
|
||||||
- All interval timestamps and enriched rating levels (yesterday/today/tomorrow)
|
- Today's interval timestamps and enriched rating levels
|
||||||
- Period calculation config (flex, min_distance, min_period_length)
|
- Period calculation config (flex, min_distance, min_period_length)
|
||||||
- Level filter overrides
|
- Level filter overrides
|
||||||
|
|
||||||
|
|
@ -103,20 +71,11 @@ class TibberPricesPeriodCalculator:
|
||||||
Hash string for cache key comparison.
|
Hash string for cache key comparison.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Get today and tomorrow intervals for hash calculation
|
# Get relevant price data from flat interval list
|
||||||
# CRITICAL: Only today+tomorrow needed in hash because:
|
# Build minimal coordinator_data structure for get_intervals_for_day_offsets
|
||||||
# 1. Mitternacht: "today" startsAt changes → cache invalidates
|
|
||||||
# 2. Tomorrow arrival: "tomorrow" startsAt changes from None → cache invalidates
|
|
||||||
# 3. Yesterday/day-before-yesterday are static (rating_levels don't change retroactively)
|
|
||||||
# 4. Using first startsAt as representative (changes → entire day changed)
|
|
||||||
coordinator_data = {"priceInfo": price_info}
|
coordinator_data = {"priceInfo": price_info}
|
||||||
today_intervals = get_intervals_for_day_offsets(coordinator_data, [0])
|
today = get_intervals_for_day_offsets(coordinator_data, [0])
|
||||||
tomorrow_intervals = get_intervals_for_day_offsets(coordinator_data, [1])
|
today_signature = tuple((interval.get("startsAt"), interval.get("rating_level")) for interval in today)
|
||||||
|
|
||||||
# Use first startsAt of each day as representative for entire day's data
|
|
||||||
# If day is empty, use None (detects data availability changes)
|
|
||||||
today_start = today_intervals[0].get("startsAt") if today_intervals else None
|
|
||||||
tomorrow_start = tomorrow_intervals[0].get("startsAt") if tomorrow_intervals else None
|
|
||||||
|
|
||||||
# Get period configs (both best and peak)
|
# Get period configs (both best and peak)
|
||||||
best_config = self.get_period_config(reverse_sort=False)
|
best_config = self.get_period_config(reverse_sort=False)
|
||||||
|
|
@ -124,14 +83,12 @@ class TibberPricesPeriodCalculator:
|
||||||
|
|
||||||
# Get level filter overrides from options
|
# Get level filter overrides from options
|
||||||
options = self.config_entry.options
|
options = self.config_entry.options
|
||||||
period_settings = options.get("period_settings", {})
|
best_level_filter = options.get(_const.CONF_BEST_PRICE_MAX_LEVEL, _const.DEFAULT_BEST_PRICE_MAX_LEVEL)
|
||||||
best_level_filter = period_settings.get(_const.CONF_BEST_PRICE_MAX_LEVEL, _const.DEFAULT_BEST_PRICE_MAX_LEVEL)
|
peak_level_filter = options.get(_const.CONF_PEAK_PRICE_MIN_LEVEL, _const.DEFAULT_PEAK_PRICE_MIN_LEVEL)
|
||||||
peak_level_filter = period_settings.get(_const.CONF_PEAK_PRICE_MIN_LEVEL, _const.DEFAULT_PEAK_PRICE_MIN_LEVEL)
|
|
||||||
|
|
||||||
# Compute hash from all relevant data
|
# Compute hash from all relevant data
|
||||||
hash_data = (
|
hash_data = (
|
||||||
today_start, # Representative for today's data (changes at midnight)
|
today_signature,
|
||||||
tomorrow_start, # Representative for tomorrow's data (changes when data arrives)
|
|
||||||
tuple(best_config.items()),
|
tuple(best_config.items()),
|
||||||
tuple(peak_config.items()),
|
tuple(peak_config.items()),
|
||||||
best_level_filter,
|
best_level_filter,
|
||||||
|
|
@ -144,7 +101,7 @@ class TibberPricesPeriodCalculator:
|
||||||
Get period calculation configuration from config options.
|
Get period calculation configuration from config options.
|
||||||
|
|
||||||
Uses cached config to avoid multiple options.get() calls.
|
Uses cached config to avoid multiple options.get() calls.
|
||||||
Cache is invalidated when config_entry.options change or override entities update.
|
Cache is invalidated when config_entry.options change.
|
||||||
"""
|
"""
|
||||||
cache_key = "peak" if reverse_sort else "best"
|
cache_key = "peak" if reverse_sort else "best"
|
||||||
|
|
||||||
|
|
@ -156,45 +113,34 @@ class TibberPricesPeriodCalculator:
|
||||||
if self._config_cache is None:
|
if self._config_cache is None:
|
||||||
self._config_cache = {}
|
self._config_cache = {}
|
||||||
|
|
||||||
# Get config values, checking overrides first
|
options = self.config_entry.options
|
||||||
# CRITICAL: Best/Peak price settings are stored in nested sections:
|
data = self.config_entry.data
|
||||||
# - period_settings: min_period_length, max_level, gap_count
|
|
||||||
# - flexibility_settings: flex, min_distance_from_avg
|
|
||||||
# Override entities can override any of these values at runtime
|
|
||||||
|
|
||||||
if reverse_sort:
|
if reverse_sort:
|
||||||
# Peak price configuration
|
# Peak price configuration
|
||||||
flex = self._get_option(
|
flex = options.get(
|
||||||
_const.CONF_PEAK_PRICE_FLEX,
|
_const.CONF_PEAK_PRICE_FLEX, data.get(_const.CONF_PEAK_PRICE_FLEX, _const.DEFAULT_PEAK_PRICE_FLEX)
|
||||||
"flexibility_settings",
|
|
||||||
_const.DEFAULT_PEAK_PRICE_FLEX,
|
|
||||||
)
|
)
|
||||||
min_distance_from_avg = self._get_option(
|
min_distance_from_avg = options.get(
|
||||||
_const.CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
_const.CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||||
"flexibility_settings",
|
data.get(_const.CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG, _const.DEFAULT_PEAK_PRICE_MIN_DISTANCE_FROM_AVG),
|
||||||
_const.DEFAULT_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
|
||||||
)
|
)
|
||||||
min_period_length = self._get_option(
|
min_period_length = options.get(
|
||||||
_const.CONF_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
_const.CONF_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
||||||
"period_settings",
|
data.get(_const.CONF_PEAK_PRICE_MIN_PERIOD_LENGTH, _const.DEFAULT_PEAK_PRICE_MIN_PERIOD_LENGTH),
|
||||||
_const.DEFAULT_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# Best price configuration
|
# Best price configuration
|
||||||
flex = self._get_option(
|
flex = options.get(
|
||||||
_const.CONF_BEST_PRICE_FLEX,
|
_const.CONF_BEST_PRICE_FLEX, data.get(_const.CONF_BEST_PRICE_FLEX, _const.DEFAULT_BEST_PRICE_FLEX)
|
||||||
"flexibility_settings",
|
|
||||||
_const.DEFAULT_BEST_PRICE_FLEX,
|
|
||||||
)
|
)
|
||||||
min_distance_from_avg = self._get_option(
|
min_distance_from_avg = options.get(
|
||||||
_const.CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG,
|
_const.CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||||
"flexibility_settings",
|
data.get(_const.CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG, _const.DEFAULT_BEST_PRICE_MIN_DISTANCE_FROM_AVG),
|
||||||
_const.DEFAULT_BEST_PRICE_MIN_DISTANCE_FROM_AVG,
|
|
||||||
)
|
)
|
||||||
min_period_length = self._get_option(
|
min_period_length = options.get(
|
||||||
_const.CONF_BEST_PRICE_MIN_PERIOD_LENGTH,
|
_const.CONF_BEST_PRICE_MIN_PERIOD_LENGTH,
|
||||||
"period_settings",
|
data.get(_const.CONF_BEST_PRICE_MIN_PERIOD_LENGTH, _const.DEFAULT_BEST_PRICE_MIN_PERIOD_LENGTH),
|
||||||
_const.DEFAULT_BEST_PRICE_MIN_PERIOD_LENGTH,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Convert flex from percentage to decimal (e.g., 5 -> 0.05)
|
# Convert flex from percentage to decimal (e.g., 5 -> 0.05)
|
||||||
|
|
@ -400,14 +346,13 @@ class TibberPricesPeriodCalculator:
|
||||||
|
|
||||||
# Normal check failed - try splitting at gap clusters as fallback
|
# Normal check failed - try splitting at gap clusters as fallback
|
||||||
# Get minimum period length from config (convert minutes to intervals)
|
# Get minimum period length from config (convert minutes to intervals)
|
||||||
period_settings = self.config_entry.options.get("period_settings", {})
|
|
||||||
if reverse_sort:
|
if reverse_sort:
|
||||||
min_period_minutes = period_settings.get(
|
min_period_minutes = self.config_entry.options.get(
|
||||||
_const.CONF_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
_const.CONF_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
||||||
_const.DEFAULT_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
_const.DEFAULT_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
min_period_minutes = period_settings.get(
|
min_period_minutes = self.config_entry.options.get(
|
||||||
_const.CONF_BEST_PRICE_MIN_PERIOD_LENGTH,
|
_const.CONF_BEST_PRICE_MIN_PERIOD_LENGTH,
|
||||||
_const.DEFAULT_BEST_PRICE_MIN_PERIOD_LENGTH,
|
_const.DEFAULT_BEST_PRICE_MIN_PERIOD_LENGTH,
|
||||||
)
|
)
|
||||||
|
|
@ -532,15 +477,13 @@ class TibberPricesPeriodCalculator:
|
||||||
# Get appropriate config based on sensor type
|
# Get appropriate config based on sensor type
|
||||||
elif reverse_sort:
|
elif reverse_sort:
|
||||||
# Peak price: minimum level filter (lower bound)
|
# Peak price: minimum level filter (lower bound)
|
||||||
period_settings = self.config_entry.options.get("period_settings", {})
|
level_config = self.config_entry.options.get(
|
||||||
level_config = period_settings.get(
|
|
||||||
_const.CONF_PEAK_PRICE_MIN_LEVEL,
|
_const.CONF_PEAK_PRICE_MIN_LEVEL,
|
||||||
_const.DEFAULT_PEAK_PRICE_MIN_LEVEL,
|
_const.DEFAULT_PEAK_PRICE_MIN_LEVEL,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# Best price: maximum level filter (upper bound)
|
# Best price: maximum level filter (upper bound)
|
||||||
period_settings = self.config_entry.options.get("period_settings", {})
|
level_config = self.config_entry.options.get(
|
||||||
level_config = period_settings.get(
|
|
||||||
_const.CONF_BEST_PRICE_MAX_LEVEL,
|
_const.CONF_BEST_PRICE_MAX_LEVEL,
|
||||||
_const.DEFAULT_BEST_PRICE_MAX_LEVEL,
|
_const.DEFAULT_BEST_PRICE_MAX_LEVEL,
|
||||||
)
|
)
|
||||||
|
|
@ -558,14 +501,13 @@ class TibberPricesPeriodCalculator:
|
||||||
return True # If no data, don't filter
|
return True # If no data, don't filter
|
||||||
|
|
||||||
# Get gap tolerance configuration
|
# Get gap tolerance configuration
|
||||||
period_settings = self.config_entry.options.get("period_settings", {})
|
|
||||||
if reverse_sort:
|
if reverse_sort:
|
||||||
max_gap_count = period_settings.get(
|
max_gap_count = self.config_entry.options.get(
|
||||||
_const.CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
_const.CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||||
_const.DEFAULT_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
_const.DEFAULT_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
max_gap_count = period_settings.get(
|
max_gap_count = self.config_entry.options.get(
|
||||||
_const.CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
_const.CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||||
_const.DEFAULT_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
_const.DEFAULT_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||||
)
|
)
|
||||||
|
|
@ -616,14 +558,15 @@ class TibberPricesPeriodCalculator:
|
||||||
|
|
||||||
self._log("debug", "Calculating periods (cache miss or hash mismatch)")
|
self._log("debug", "Calculating periods (cache miss or hash mismatch)")
|
||||||
|
|
||||||
# Get all intervals at once (day before yesterday + yesterday + today + tomorrow)
|
# Get intervals by day from flat list
|
||||||
# CRITICAL: 4 days ensure stable historical period calculations
|
# Build minimal coordinator_data structure for get_intervals_for_day_offsets
|
||||||
# (periods calculated today for yesterday match periods calculated yesterday)
|
|
||||||
coordinator_data = {"priceInfo": price_info}
|
coordinator_data = {"priceInfo": price_info}
|
||||||
all_prices = get_intervals_for_day_offsets(coordinator_data, [-2, -1, 0, 1])
|
yesterday_prices = get_intervals_for_day_offsets(coordinator_data, [-1])
|
||||||
|
today_prices = get_intervals_for_day_offsets(coordinator_data, [0])
|
||||||
|
tomorrow_prices = get_intervals_for_day_offsets(coordinator_data, [1])
|
||||||
|
all_prices = yesterday_prices + today_prices + tomorrow_prices
|
||||||
|
|
||||||
# Get rating thresholds from config (flat in options, not in sections)
|
# Get rating thresholds from config
|
||||||
# CRITICAL: Price rating thresholds are stored FLAT in options (no sections)
|
|
||||||
threshold_low = self.config_entry.options.get(
|
threshold_low = self.config_entry.options.get(
|
||||||
_const.CONF_PRICE_RATING_THRESHOLD_LOW,
|
_const.CONF_PRICE_RATING_THRESHOLD_LOW,
|
||||||
_const.DEFAULT_PRICE_RATING_THRESHOLD_LOW,
|
_const.DEFAULT_PRICE_RATING_THRESHOLD_LOW,
|
||||||
|
|
@ -633,8 +576,7 @@ class TibberPricesPeriodCalculator:
|
||||||
_const.DEFAULT_PRICE_RATING_THRESHOLD_HIGH,
|
_const.DEFAULT_PRICE_RATING_THRESHOLD_HIGH,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Get volatility thresholds from config (flat in options, not in sections)
|
# Get volatility thresholds from config
|
||||||
# CRITICAL: Volatility thresholds are stored FLAT in options (no sections)
|
|
||||||
threshold_volatility_moderate = self.config_entry.options.get(
|
threshold_volatility_moderate = self.config_entry.options.get(
|
||||||
_const.CONF_VOLATILITY_THRESHOLD_MODERATE,
|
_const.CONF_VOLATILITY_THRESHOLD_MODERATE,
|
||||||
_const.DEFAULT_VOLATILITY_THRESHOLD_MODERATE,
|
_const.DEFAULT_VOLATILITY_THRESHOLD_MODERATE,
|
||||||
|
|
@ -649,11 +591,8 @@ class TibberPricesPeriodCalculator:
|
||||||
)
|
)
|
||||||
|
|
||||||
# Get relaxation configuration for best price
|
# Get relaxation configuration for best price
|
||||||
# CRITICAL: Relaxation settings are stored in nested section 'relaxation_and_target_periods'
|
enable_relaxation_best = self.config_entry.options.get(
|
||||||
# Override entities can override any of these values at runtime
|
|
||||||
enable_relaxation_best = self._get_option(
|
|
||||||
_const.CONF_ENABLE_MIN_PERIODS_BEST,
|
_const.CONF_ENABLE_MIN_PERIODS_BEST,
|
||||||
"relaxation_and_target_periods",
|
|
||||||
_const.DEFAULT_ENABLE_MIN_PERIODS_BEST,
|
_const.DEFAULT_ENABLE_MIN_PERIODS_BEST,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -664,30 +603,25 @@ class TibberPricesPeriodCalculator:
|
||||||
show_best_price = bool(all_prices)
|
show_best_price = bool(all_prices)
|
||||||
else:
|
else:
|
||||||
show_best_price = self.should_show_periods(price_info, reverse_sort=False) if all_prices else False
|
show_best_price = self.should_show_periods(price_info, reverse_sort=False) if all_prices else False
|
||||||
min_periods_best = self._get_option(
|
min_periods_best = self.config_entry.options.get(
|
||||||
_const.CONF_MIN_PERIODS_BEST,
|
_const.CONF_MIN_PERIODS_BEST,
|
||||||
"relaxation_and_target_periods",
|
|
||||||
_const.DEFAULT_MIN_PERIODS_BEST,
|
_const.DEFAULT_MIN_PERIODS_BEST,
|
||||||
)
|
)
|
||||||
relaxation_attempts_best = self._get_option(
|
relaxation_attempts_best = self.config_entry.options.get(
|
||||||
_const.CONF_RELAXATION_ATTEMPTS_BEST,
|
_const.CONF_RELAXATION_ATTEMPTS_BEST,
|
||||||
"relaxation_and_target_periods",
|
|
||||||
_const.DEFAULT_RELAXATION_ATTEMPTS_BEST,
|
_const.DEFAULT_RELAXATION_ATTEMPTS_BEST,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Calculate best price periods (or return empty if filtered)
|
# Calculate best price periods (or return empty if filtered)
|
||||||
if show_best_price:
|
if show_best_price:
|
||||||
best_config = self.get_period_config(reverse_sort=False)
|
best_config = self.get_period_config(reverse_sort=False)
|
||||||
# Get level filter configuration from period_settings section
|
# Get level filter configuration
|
||||||
# CRITICAL: max_level and gap_count are stored in nested section 'period_settings'
|
max_level_best = self.config_entry.options.get(
|
||||||
max_level_best = self._get_option(
|
|
||||||
_const.CONF_BEST_PRICE_MAX_LEVEL,
|
_const.CONF_BEST_PRICE_MAX_LEVEL,
|
||||||
"period_settings",
|
|
||||||
_const.DEFAULT_BEST_PRICE_MAX_LEVEL,
|
_const.DEFAULT_BEST_PRICE_MAX_LEVEL,
|
||||||
)
|
)
|
||||||
gap_count_best = self._get_option(
|
gap_count_best = self.config_entry.options.get(
|
||||||
_const.CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
_const.CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||||
"period_settings",
|
|
||||||
_const.DEFAULT_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
_const.DEFAULT_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||||
)
|
)
|
||||||
best_period_config = TibberPricesPeriodConfig(
|
best_period_config = TibberPricesPeriodConfig(
|
||||||
|
|
@ -715,7 +649,6 @@ class TibberPricesPeriodCalculator:
|
||||||
level_override=lvl,
|
level_override=lvl,
|
||||||
),
|
),
|
||||||
time=self.time,
|
time=self.time,
|
||||||
config_entry=self.config_entry,
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
best_periods = {
|
best_periods = {
|
||||||
|
|
@ -730,11 +663,8 @@ class TibberPricesPeriodCalculator:
|
||||||
}
|
}
|
||||||
|
|
||||||
# Get relaxation configuration for peak price
|
# Get relaxation configuration for peak price
|
||||||
# CRITICAL: Relaxation settings are stored in nested section 'relaxation_and_target_periods'
|
enable_relaxation_peak = self.config_entry.options.get(
|
||||||
# Override entities can override any of these values at runtime
|
|
||||||
enable_relaxation_peak = self._get_option(
|
|
||||||
_const.CONF_ENABLE_MIN_PERIODS_PEAK,
|
_const.CONF_ENABLE_MIN_PERIODS_PEAK,
|
||||||
"relaxation_and_target_periods",
|
|
||||||
_const.DEFAULT_ENABLE_MIN_PERIODS_PEAK,
|
_const.DEFAULT_ENABLE_MIN_PERIODS_PEAK,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -745,30 +675,25 @@ class TibberPricesPeriodCalculator:
|
||||||
show_peak_price = bool(all_prices)
|
show_peak_price = bool(all_prices)
|
||||||
else:
|
else:
|
||||||
show_peak_price = self.should_show_periods(price_info, reverse_sort=True) if all_prices else False
|
show_peak_price = self.should_show_periods(price_info, reverse_sort=True) if all_prices else False
|
||||||
min_periods_peak = self._get_option(
|
min_periods_peak = self.config_entry.options.get(
|
||||||
_const.CONF_MIN_PERIODS_PEAK,
|
_const.CONF_MIN_PERIODS_PEAK,
|
||||||
"relaxation_and_target_periods",
|
|
||||||
_const.DEFAULT_MIN_PERIODS_PEAK,
|
_const.DEFAULT_MIN_PERIODS_PEAK,
|
||||||
)
|
)
|
||||||
relaxation_attempts_peak = self._get_option(
|
relaxation_attempts_peak = self.config_entry.options.get(
|
||||||
_const.CONF_RELAXATION_ATTEMPTS_PEAK,
|
_const.CONF_RELAXATION_ATTEMPTS_PEAK,
|
||||||
"relaxation_and_target_periods",
|
|
||||||
_const.DEFAULT_RELAXATION_ATTEMPTS_PEAK,
|
_const.DEFAULT_RELAXATION_ATTEMPTS_PEAK,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Calculate peak price periods (or return empty if filtered)
|
# Calculate peak price periods (or return empty if filtered)
|
||||||
if show_peak_price:
|
if show_peak_price:
|
||||||
peak_config = self.get_period_config(reverse_sort=True)
|
peak_config = self.get_period_config(reverse_sort=True)
|
||||||
# Get level filter configuration from period_settings section
|
# Get level filter configuration
|
||||||
# CRITICAL: min_level and gap_count are stored in nested section 'period_settings'
|
min_level_peak = self.config_entry.options.get(
|
||||||
min_level_peak = self._get_option(
|
|
||||||
_const.CONF_PEAK_PRICE_MIN_LEVEL,
|
_const.CONF_PEAK_PRICE_MIN_LEVEL,
|
||||||
"period_settings",
|
|
||||||
_const.DEFAULT_PEAK_PRICE_MIN_LEVEL,
|
_const.DEFAULT_PEAK_PRICE_MIN_LEVEL,
|
||||||
)
|
)
|
||||||
gap_count_peak = self._get_option(
|
gap_count_peak = self.config_entry.options.get(
|
||||||
_const.CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
_const.CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||||
"period_settings",
|
|
||||||
_const.DEFAULT_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
_const.DEFAULT_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||||
)
|
)
|
||||||
peak_period_config = TibberPricesPeriodConfig(
|
peak_period_config = TibberPricesPeriodConfig(
|
||||||
|
|
@ -796,7 +721,6 @@ class TibberPricesPeriodCalculator:
|
||||||
level_override=lvl,
|
level_override=lvl,
|
||||||
),
|
),
|
||||||
time=self.time,
|
time=self.time,
|
||||||
config_entry=self.config_entry,
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
peak_periods = {
|
peak_periods = {
|
||||||
|
|
|
||||||
|
|
@ -1,631 +0,0 @@
|
||||||
"""
|
|
||||||
Price data management for the coordinator.
|
|
||||||
|
|
||||||
This module manages all price-related data for the Tibber Prices integration:
|
|
||||||
|
|
||||||
**User Data** (fetched directly via API):
|
|
||||||
- Home metadata (name, address, timezone)
|
|
||||||
- Account info (subscription status)
|
|
||||||
- Currency settings
|
|
||||||
- Refreshed daily (24h interval)
|
|
||||||
|
|
||||||
**Price Data** (fetched via IntervalPool):
|
|
||||||
- Quarter-hourly price intervals
|
|
||||||
- Yesterday/today/tomorrow coverage
|
|
||||||
- The IntervalPool handles actual API fetching, deduplication, and caching
|
|
||||||
- This manager coordinates the data flow and user data refresh
|
|
||||||
|
|
||||||
Data flow:
|
|
||||||
Tibber API → IntervalPool → PriceDataManager → Coordinator → Sensors
|
|
||||||
↑ ↓
|
|
||||||
(actual fetching) (orchestration + user data)
|
|
||||||
|
|
||||||
Note: Price data is NOT cached in this module - IntervalPool is the single
|
|
||||||
source of truth. This module only caches user_data for daily refresh cycle.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import logging
|
|
||||||
from datetime import timedelta
|
|
||||||
from typing import TYPE_CHECKING, Any
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.api import (
|
|
||||||
TibberPricesApiClientAuthenticationError,
|
|
||||||
TibberPricesApiClientCommunicationError,
|
|
||||||
TibberPricesApiClientError,
|
|
||||||
)
|
|
||||||
from homeassistant.exceptions import ConfigEntryAuthFailed
|
|
||||||
from homeassistant.helpers.update_coordinator import UpdateFailed
|
|
||||||
|
|
||||||
from . import cache, helpers
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from collections.abc import Callable
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.api import TibberPricesApiClient
|
|
||||||
from custom_components.tibber_prices.interval_pool import TibberPricesIntervalPool
|
|
||||||
|
|
||||||
from .time_service import TibberPricesTimeService
|
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# Hour when Tibber publishes tomorrow's prices (around 13:00 local time)
|
|
||||||
# Before this hour, requesting tomorrow data will always fail → wasted API call
|
|
||||||
TOMORROW_DATA_AVAILABLE_HOUR = 13
|
|
||||||
|
|
||||||
|
|
||||||
class TibberPricesPriceDataManager:
|
|
||||||
"""
|
|
||||||
Manages price and user data for the coordinator.
|
|
||||||
|
|
||||||
Responsibilities:
|
|
||||||
- User data: Fetches directly via API, validates, caches with persistence
|
|
||||||
- Price data: Coordinates with IntervalPool (which does actual API fetching)
|
|
||||||
- Cache management: Loads/stores both data types to HA persistent storage
|
|
||||||
- Update decisions: Determines when fresh data is needed
|
|
||||||
|
|
||||||
Note: Despite the name, this class does NOT do the actual price fetching.
|
|
||||||
The IntervalPool handles API calls, deduplication, and interval management.
|
|
||||||
This class orchestrates WHEN to fetch and processes the results.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__( # noqa: PLR0913
|
|
||||||
self,
|
|
||||||
api: TibberPricesApiClient,
|
|
||||||
store: Any,
|
|
||||||
log_prefix: str,
|
|
||||||
user_update_interval: timedelta,
|
|
||||||
time: TibberPricesTimeService,
|
|
||||||
home_id: str,
|
|
||||||
interval_pool: TibberPricesIntervalPool,
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Initialize the price data manager.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
api: API client for direct requests (user data only).
|
|
||||||
store: Home Assistant storage for persistence.
|
|
||||||
log_prefix: Prefix for log messages (e.g., "[Home Name]").
|
|
||||||
user_update_interval: How often to refresh user data (default: 1 day).
|
|
||||||
time: TimeService for time operations.
|
|
||||||
home_id: Home ID this manager is responsible for.
|
|
||||||
interval_pool: IntervalPool for price data (handles actual fetching).
|
|
||||||
|
|
||||||
"""
|
|
||||||
self.api = api
|
|
||||||
self._store = store
|
|
||||||
self._log_prefix = log_prefix
|
|
||||||
self._user_update_interval = user_update_interval
|
|
||||||
self.time: TibberPricesTimeService = time
|
|
||||||
self.home_id = home_id
|
|
||||||
self._interval_pool = interval_pool
|
|
||||||
|
|
||||||
# Cached data (user data only - price data is in IntervalPool)
|
|
||||||
self._cached_user_data: dict[str, Any] | None = None
|
|
||||||
self._last_user_update: datetime | None = None
|
|
||||||
|
|
||||||
def _log(self, level: str, message: str, *args: object, **kwargs: object) -> None:
|
|
||||||
"""Log with coordinator-specific prefix."""
|
|
||||||
prefixed_message = f"{self._log_prefix} {message}"
|
|
||||||
getattr(_LOGGER, level)(prefixed_message, *args, **kwargs)
|
|
||||||
|
|
||||||
async def load_cache(self) -> None:
|
|
||||||
"""Load cached user data from storage (price data is in IntervalPool)."""
|
|
||||||
cache_data = await cache.load_cache(self._store, self._log_prefix, time=self.time)
|
|
||||||
|
|
||||||
self._cached_user_data = cache_data.user_data
|
|
||||||
self._last_user_update = cache_data.last_user_update
|
|
||||||
|
|
||||||
def should_fetch_tomorrow_data(
|
|
||||||
self,
|
|
||||||
current_price_info: list[dict[str, Any]] | None,
|
|
||||||
) -> bool:
|
|
||||||
"""
|
|
||||||
Determine if tomorrow's data should be requested from the API.
|
|
||||||
|
|
||||||
This is the key intelligence that prevents API spam:
|
|
||||||
- Tibber publishes tomorrow's prices around 13:00 each day
|
|
||||||
- Before 13:00, requesting tomorrow data will always fail → wasted API call
|
|
||||||
- If we already have tomorrow data, no need to request it again
|
|
||||||
|
|
||||||
The decision logic:
|
|
||||||
1. Before 13:00 local time → Don't fetch (data not available yet)
|
|
||||||
2. After 13:00 AND tomorrow data already present → Don't fetch (already have it)
|
|
||||||
3. After 13:00 AND tomorrow data missing → Fetch (data should be available)
|
|
||||||
|
|
||||||
Args:
|
|
||||||
current_price_info: List of price intervals from current coordinator data.
|
|
||||||
Used to check if tomorrow data already exists.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if tomorrow data should be requested, False otherwise.
|
|
||||||
|
|
||||||
"""
|
|
||||||
now = self.time.now()
|
|
||||||
now_local = self.time.as_local(now)
|
|
||||||
current_hour = now_local.hour
|
|
||||||
|
|
||||||
# Before TOMORROW_DATA_AVAILABLE_HOUR - tomorrow data not available yet
|
|
||||||
if current_hour < TOMORROW_DATA_AVAILABLE_HOUR:
|
|
||||||
self._log("debug", "Before %d:00 - not requesting tomorrow data", TOMORROW_DATA_AVAILABLE_HOUR)
|
|
||||||
return False
|
|
||||||
|
|
||||||
# After TOMORROW_DATA_AVAILABLE_HOUR - check if we already have tomorrow data
|
|
||||||
if current_price_info:
|
|
||||||
has_tomorrow = self.has_tomorrow_data(current_price_info)
|
|
||||||
if has_tomorrow:
|
|
||||||
self._log(
|
|
||||||
"debug", "After %d:00 but already have tomorrow data - not requesting", TOMORROW_DATA_AVAILABLE_HOUR
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
self._log("debug", "After %d:00 and tomorrow data missing - will request", TOMORROW_DATA_AVAILABLE_HOUR)
|
|
||||||
return True
|
|
||||||
|
|
||||||
# No current data - request tomorrow data if after TOMORROW_DATA_AVAILABLE_HOUR
|
|
||||||
self._log(
|
|
||||||
"debug", "After %d:00 with no current data - will request tomorrow data", TOMORROW_DATA_AVAILABLE_HOUR
|
|
||||||
)
|
|
||||||
return True
|
|
||||||
|
|
||||||
async def store_cache(self, last_midnight_check: datetime | None = None) -> None:
|
|
||||||
"""Store cache data (user metadata only, price data is in IntervalPool)."""
|
|
||||||
cache_data = cache.TibberPricesCacheData(
|
|
||||||
user_data=self._cached_user_data,
|
|
||||||
last_user_update=self._last_user_update,
|
|
||||||
last_midnight_check=last_midnight_check,
|
|
||||||
)
|
|
||||||
await cache.save_cache(self._store, cache_data, self._log_prefix)
|
|
||||||
|
|
||||||
def _validate_user_data(self, user_data: dict, home_id: str) -> bool: # noqa: PLR0911
|
|
||||||
"""
|
|
||||||
Validate user data completeness.
|
|
||||||
|
|
||||||
Rejects incomplete/invalid data from API to prevent caching temporary errors.
|
|
||||||
Currency information is critical - if missing, we cannot safely calculate prices.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
user_data: User data dict from API.
|
|
||||||
home_id: Home ID to validate against.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if data is valid and complete, False otherwise.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if not user_data:
|
|
||||||
self._log("warning", "User data validation failed: Empty data")
|
|
||||||
return False
|
|
||||||
|
|
||||||
viewer = user_data.get("viewer")
|
|
||||||
if not viewer or not isinstance(viewer, dict):
|
|
||||||
self._log("warning", "User data validation failed: Missing or invalid viewer")
|
|
||||||
return False
|
|
||||||
|
|
||||||
homes = viewer.get("homes")
|
|
||||||
if not homes or not isinstance(homes, list) or len(homes) == 0:
|
|
||||||
self._log("warning", "User data validation failed: No homes found")
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Find our home and validate it has required data
|
|
||||||
home_found = False
|
|
||||||
for home in homes:
|
|
||||||
if home.get("id") == home_id:
|
|
||||||
home_found = True
|
|
||||||
|
|
||||||
# Validate home has timezone (required for cursor calculation)
|
|
||||||
if not home.get("timeZone"):
|
|
||||||
self._log("warning", "User data validation failed: Home %s missing timezone", home_id)
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Currency is REQUIRED - we cannot function without it
|
|
||||||
# The currency is nested in currentSubscription.priceInfo.current.currency
|
|
||||||
subscription = home.get("currentSubscription")
|
|
||||||
if not subscription:
|
|
||||||
self._log(
|
|
||||||
"warning",
|
|
||||||
"User data validation failed: Home %s has no active subscription",
|
|
||||||
home_id,
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
price_info = subscription.get("priceInfo")
|
|
||||||
if not price_info:
|
|
||||||
self._log(
|
|
||||||
"warning",
|
|
||||||
"User data validation failed: Home %s subscription has no priceInfo",
|
|
||||||
home_id,
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
current = price_info.get("current")
|
|
||||||
if not current:
|
|
||||||
self._log(
|
|
||||||
"warning",
|
|
||||||
"User data validation failed: Home %s priceInfo has no current data",
|
|
||||||
home_id,
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
currency = current.get("currency")
|
|
||||||
if not currency:
|
|
||||||
self._log(
|
|
||||||
"warning",
|
|
||||||
"User data validation failed: Home %s has no currency",
|
|
||||||
home_id,
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
break
|
|
||||||
|
|
||||||
if not home_found:
|
|
||||||
self._log("warning", "User data validation failed: Home %s not found in homes list", home_id)
|
|
||||||
return False
|
|
||||||
|
|
||||||
self._log("debug", "User data validation passed for home %s", home_id)
|
|
||||||
return True
|
|
||||||
|
|
||||||
async def update_user_data_if_needed(self, current_time: datetime) -> bool:
|
|
||||||
"""
|
|
||||||
Update user data if needed (daily check).
|
|
||||||
|
|
||||||
Only accepts complete and valid data. If API returns incomplete data
|
|
||||||
(e.g., during maintenance), keeps existing cached data and retries later.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if user data was updated, False otherwise
|
|
||||||
|
|
||||||
"""
|
|
||||||
if self._last_user_update is None or current_time - self._last_user_update >= self._user_update_interval:
|
|
||||||
try:
|
|
||||||
self._log("debug", "Updating user data")
|
|
||||||
user_data = await self.api.async_get_viewer_details()
|
|
||||||
|
|
||||||
# Validate before caching
|
|
||||||
if not self._validate_user_data(user_data, self.home_id):
|
|
||||||
self._log(
|
|
||||||
"warning",
|
|
||||||
"Rejecting incomplete user data from API - keeping existing cached data",
|
|
||||||
)
|
|
||||||
return False # Keep existing data, don't update timestamp
|
|
||||||
|
|
||||||
# Data is valid, cache it
|
|
||||||
self._cached_user_data = user_data
|
|
||||||
self._last_user_update = current_time
|
|
||||||
self._log("debug", "User data updated successfully")
|
|
||||||
except (
|
|
||||||
TibberPricesApiClientError,
|
|
||||||
TibberPricesApiClientCommunicationError,
|
|
||||||
) as ex:
|
|
||||||
self._log("warning", "Failed to update user data: %s", ex)
|
|
||||||
return False # Update failed
|
|
||||||
else:
|
|
||||||
return True # User data was updated
|
|
||||||
return False # No update needed
|
|
||||||
|
|
||||||
async def fetch_home_data(
|
|
||||||
self,
|
|
||||||
home_id: str,
|
|
||||||
current_time: datetime,
|
|
||||||
*,
|
|
||||||
include_tomorrow: bool = True,
|
|
||||||
) -> tuple[dict[str, Any], bool]:
|
|
||||||
"""
|
|
||||||
Fetch data for a single home via pool.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
home_id: Home ID to fetch data for.
|
|
||||||
current_time: Current time for timestamp in result.
|
|
||||||
include_tomorrow: If True, request tomorrow's data too. If False,
|
|
||||||
only request up to end of today.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple of (data_dict, api_called):
|
|
||||||
- data_dict: Dictionary with timestamp, home_id, price_info, currency.
|
|
||||||
- api_called: True if API was called to fetch missing data.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if not home_id:
|
|
||||||
self._log("warning", "No home ID provided - cannot fetch price data")
|
|
||||||
return (
|
|
||||||
{
|
|
||||||
"timestamp": current_time,
|
|
||||||
"home_id": "",
|
|
||||||
"price_info": [],
|
|
||||||
"currency": "EUR",
|
|
||||||
},
|
|
||||||
False, # No API call made
|
|
||||||
)
|
|
||||||
|
|
||||||
# Ensure we have user_data before fetching price data
|
|
||||||
# This is critical for timezone-aware cursor calculation
|
|
||||||
if not self._cached_user_data:
|
|
||||||
self._log("info", "User data not cached, fetching before price data")
|
|
||||||
try:
|
|
||||||
user_data = await self.api.async_get_viewer_details()
|
|
||||||
|
|
||||||
# Validate data before accepting it (especially on initial setup)
|
|
||||||
if not self._validate_user_data(user_data, self.home_id):
|
|
||||||
msg = "Received incomplete user data from API - cannot proceed with price fetching"
|
|
||||||
self._log("error", msg)
|
|
||||||
raise TibberPricesApiClientError(msg) # noqa: TRY301
|
|
||||||
|
|
||||||
self._cached_user_data = user_data
|
|
||||||
self._last_user_update = current_time
|
|
||||||
except (
|
|
||||||
TibberPricesApiClientError,
|
|
||||||
TibberPricesApiClientCommunicationError,
|
|
||||||
) as ex:
|
|
||||||
msg = f"Failed to fetch user data (required for price fetching): {ex}"
|
|
||||||
self._log("error", msg)
|
|
||||||
raise TibberPricesApiClientError(msg) from ex
|
|
||||||
|
|
||||||
# At this point, _cached_user_data is guaranteed to be not None (checked above)
|
|
||||||
if not self._cached_user_data:
|
|
||||||
msg = "User data unexpectedly None after fetch attempt"
|
|
||||||
raise TibberPricesApiClientError(msg)
|
|
||||||
|
|
||||||
# Retrieve price data via IntervalPool (single source of truth)
|
|
||||||
price_info, api_called = await self._fetch_via_pool(home_id, include_tomorrow=include_tomorrow)
|
|
||||||
|
|
||||||
# Extract currency for this home from user_data
|
|
||||||
currency = self._get_currency_for_home(home_id)
|
|
||||||
|
|
||||||
self._log(
|
|
||||||
"debug",
|
|
||||||
"Successfully fetched data for home %s (%d intervals, api_called=%s)",
|
|
||||||
home_id,
|
|
||||||
len(price_info),
|
|
||||||
api_called,
|
|
||||||
)
|
|
||||||
|
|
||||||
return (
|
|
||||||
{
|
|
||||||
"timestamp": current_time,
|
|
||||||
"home_id": home_id,
|
|
||||||
"price_info": price_info,
|
|
||||||
"currency": currency,
|
|
||||||
},
|
|
||||||
api_called,
|
|
||||||
)
|
|
||||||
|
|
||||||
async def _fetch_via_pool(
|
|
||||||
self,
|
|
||||||
home_id: str,
|
|
||||||
*,
|
|
||||||
include_tomorrow: bool = True,
|
|
||||||
) -> tuple[list[dict[str, Any]], bool]:
|
|
||||||
"""
|
|
||||||
Retrieve price data via IntervalPool.
|
|
||||||
|
|
||||||
The IntervalPool is the single source of truth for price data:
|
|
||||||
- Handles actual API calls to Tibber
|
|
||||||
- Manages deduplication and caching
|
|
||||||
- Provides intervals from day-before-yesterday to end-of-today/tomorrow
|
|
||||||
|
|
||||||
This method delegates to the Pool's get_sensor_data() which returns
|
|
||||||
all relevant intervals for sensor display.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
home_id: Home ID (currently unused, Pool knows its home).
|
|
||||||
include_tomorrow: If True, request tomorrow's data too. If False,
|
|
||||||
only request up to end of today. This prevents
|
|
||||||
API spam before 13:00 when Tibber doesn't have
|
|
||||||
tomorrow data yet.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple of (intervals, api_called):
|
|
||||||
- intervals: List of price interval dicts.
|
|
||||||
- api_called: True if API was called to fetch missing data.
|
|
||||||
|
|
||||||
"""
|
|
||||||
# user_data is guaranteed by fetch_home_data(), but needed for type narrowing
|
|
||||||
if self._cached_user_data is None:
|
|
||||||
return [], False # No data, no API call
|
|
||||||
|
|
||||||
self._log(
|
|
||||||
"debug",
|
|
||||||
"Retrieving price data for home %s via interval pool (include_tomorrow=%s)",
|
|
||||||
home_id,
|
|
||||||
include_tomorrow,
|
|
||||||
)
|
|
||||||
intervals, api_called = await self._interval_pool.get_sensor_data(
|
|
||||||
api_client=self.api,
|
|
||||||
user_data=self._cached_user_data,
|
|
||||||
include_tomorrow=include_tomorrow,
|
|
||||||
)
|
|
||||||
|
|
||||||
return intervals, api_called
|
|
||||||
|
|
||||||
def _get_currency_for_home(self, home_id: str) -> str:
|
|
||||||
"""
|
|
||||||
Get currency for a specific home from cached user_data.
|
|
||||||
|
|
||||||
Note: The cached user_data is validated before storage, so if we have
|
|
||||||
cached data it should contain valid currency. This method extracts
|
|
||||||
the currency from the nested structure.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Currency code (e.g., "EUR", "NOK", "SEK").
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
TibberPricesApiClientError: If currency cannot be determined.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if not self._cached_user_data:
|
|
||||||
msg = "No user data cached - cannot determine currency"
|
|
||||||
self._log("error", msg)
|
|
||||||
raise TibberPricesApiClientError(msg)
|
|
||||||
|
|
||||||
viewer = self._cached_user_data.get("viewer", {})
|
|
||||||
homes = viewer.get("homes", [])
|
|
||||||
|
|
||||||
for home in homes:
|
|
||||||
if home.get("id") == home_id:
|
|
||||||
# Extract currency from nested structure
|
|
||||||
# Use 'or {}' to handle None values (homes without active subscription)
|
|
||||||
subscription = home.get("currentSubscription") or {}
|
|
||||||
price_info = subscription.get("priceInfo") or {}
|
|
||||||
current = price_info.get("current") or {}
|
|
||||||
currency = current.get("currency")
|
|
||||||
|
|
||||||
if not currency:
|
|
||||||
# This should not happen if validation worked correctly
|
|
||||||
msg = f"Home {home_id} has no active subscription - currency unavailable"
|
|
||||||
self._log("error", msg)
|
|
||||||
raise TibberPricesApiClientError(msg)
|
|
||||||
|
|
||||||
self._log("debug", "Extracted currency %s for home %s", currency, home_id)
|
|
||||||
return currency
|
|
||||||
|
|
||||||
# Home not found in cached data - data validation should have caught this
|
|
||||||
msg = f"Home {home_id} not found in user data - data validation failed"
|
|
||||||
self._log("error", msg)
|
|
||||||
raise TibberPricesApiClientError(msg)
|
|
||||||
|
|
||||||
def _check_home_exists(self, home_id: str) -> bool:
|
|
||||||
"""
|
|
||||||
Check if a home ID exists in cached user data.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
home_id: The home ID to check.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if home exists, False otherwise.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if not self._cached_user_data:
|
|
||||||
# No user data yet - assume home exists (will be checked on next update)
|
|
||||||
return True
|
|
||||||
|
|
||||||
viewer = self._cached_user_data.get("viewer", {})
|
|
||||||
homes = viewer.get("homes", [])
|
|
||||||
|
|
||||||
return any(home.get("id") == home_id for home in homes)
|
|
||||||
|
|
||||||
async def handle_main_entry_update(
|
|
||||||
self,
|
|
||||||
current_time: datetime,
|
|
||||||
home_id: str,
|
|
||||||
transform_fn: Callable[[dict[str, Any]], dict[str, Any]],
|
|
||||||
*,
|
|
||||||
current_price_info: list[dict[str, Any]] | None = None,
|
|
||||||
) -> tuple[dict[str, Any], bool]:
|
|
||||||
"""
|
|
||||||
Handle update for main entry - fetch data for this home.
|
|
||||||
|
|
||||||
The IntervalPool is the single source of truth for price data:
|
|
||||||
- It handles API fetching, deduplication, and caching internally
|
|
||||||
- We decide WHEN to fetch tomorrow data (after 13:00, if not already present)
|
|
||||||
- This prevents API spam before 13:00 when Tibber doesn't have tomorrow data
|
|
||||||
|
|
||||||
This method:
|
|
||||||
1. Updates user data if needed (daily)
|
|
||||||
2. Determines if tomorrow data should be requested
|
|
||||||
3. Fetches price data via IntervalPool
|
|
||||||
4. Transforms result for coordinator
|
|
||||||
|
|
||||||
Args:
|
|
||||||
current_time: Current time for update decisions.
|
|
||||||
home_id: Home ID to fetch data for.
|
|
||||||
transform_fn: Function to transform raw data for coordinator.
|
|
||||||
current_price_info: Current price intervals (from coordinator.data["priceInfo"]).
|
|
||||||
Used to check if tomorrow data already exists.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple of (transformed_data, api_called):
|
|
||||||
- transformed_data: Transformed data dict for coordinator.
|
|
||||||
- api_called: True if API was called to fetch missing data.
|
|
||||||
|
|
||||||
"""
|
|
||||||
# Update user data if needed (daily check)
|
|
||||||
user_data_updated = await self.update_user_data_if_needed(current_time)
|
|
||||||
|
|
||||||
# Check if this home still exists in user data after update
|
|
||||||
# This detects when a home was removed from the Tibber account
|
|
||||||
home_exists = self._check_home_exists(home_id)
|
|
||||||
if not home_exists:
|
|
||||||
self._log("warning", "Home ID %s not found in Tibber account", home_id)
|
|
||||||
# Return a special marker in the result that coordinator can check
|
|
||||||
result = transform_fn({})
|
|
||||||
result["_home_not_found"] = True # Special marker for coordinator
|
|
||||||
return result, False # No API call made (home doesn't exist)
|
|
||||||
|
|
||||||
# Determine if we should request tomorrow data
|
|
||||||
include_tomorrow = self.should_fetch_tomorrow_data(current_price_info)
|
|
||||||
|
|
||||||
# Fetch price data via IntervalPool
|
|
||||||
self._log(
|
|
||||||
"debug",
|
|
||||||
"Fetching price data for home %s via interval pool (include_tomorrow=%s)",
|
|
||||||
home_id,
|
|
||||||
include_tomorrow,
|
|
||||||
)
|
|
||||||
raw_data, api_called = await self.fetch_home_data(home_id, current_time, include_tomorrow=include_tomorrow)
|
|
||||||
|
|
||||||
# Parse timestamps immediately after fetch
|
|
||||||
raw_data = helpers.parse_all_timestamps(raw_data, time=self.time)
|
|
||||||
|
|
||||||
# Store user data cache (price data persisted by IntervalPool)
|
|
||||||
if user_data_updated:
|
|
||||||
await self.store_cache()
|
|
||||||
|
|
||||||
# Transform for main entry
|
|
||||||
return transform_fn(raw_data), api_called
|
|
||||||
|
|
||||||
async def handle_api_error(
|
|
||||||
self,
|
|
||||||
error: Exception,
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Handle API errors - re-raise appropriate exceptions.
|
|
||||||
|
|
||||||
Note: With IntervalPool as source of truth, there's no local price cache
|
|
||||||
to fall back to. The Pool has its own persistence, so on next update
|
|
||||||
it will use its cached intervals if API is unavailable.
|
|
||||||
"""
|
|
||||||
if isinstance(error, TibberPricesApiClientAuthenticationError):
|
|
||||||
msg = "Invalid access token"
|
|
||||||
raise ConfigEntryAuthFailed(msg) from error
|
|
||||||
|
|
||||||
msg = f"Error communicating with API: {error}"
|
|
||||||
raise UpdateFailed(msg) from error
|
|
||||||
|
|
||||||
@property
|
|
||||||
def cached_user_data(self) -> dict[str, Any] | None:
|
|
||||||
"""Get cached user data."""
|
|
||||||
return self._cached_user_data
|
|
||||||
|
|
||||||
def has_tomorrow_data(self, price_info: list[dict[str, Any]]) -> bool:
|
|
||||||
"""
|
|
||||||
Check if tomorrow's price data is available.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
price_info: List of price intervals from coordinator data.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if at least one interval from tomorrow is present.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if not price_info:
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Get tomorrow's date
|
|
||||||
now = self.time.now()
|
|
||||||
tomorrow = (self.time.as_local(now) + timedelta(days=1)).date()
|
|
||||||
|
|
||||||
# Check if any interval is from tomorrow
|
|
||||||
for interval in price_info:
|
|
||||||
if "startsAt" not in interval:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# startsAt is already a datetime object after _transform_data()
|
|
||||||
interval_time = interval["startsAt"]
|
|
||||||
if isinstance(interval_time, str):
|
|
||||||
# Fallback: parse if still string (shouldn't happen with transformed data)
|
|
||||||
interval_time = self.time.parse_datetime(interval_time)
|
|
||||||
|
|
||||||
if interval_time and self.time.as_local(interval_time).date() == tomorrow:
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
@ -1,228 +0,0 @@
|
||||||
"""
|
|
||||||
Repair issue management for Tibber Prices integration.
|
|
||||||
|
|
||||||
This module handles creation and cleanup of repair issues that notify users
|
|
||||||
about problems requiring attention in the Home Assistant UI.
|
|
||||||
|
|
||||||
Repair Types:
|
|
||||||
1. Tomorrow Data Missing - Warns when tomorrow's price data is unavailable after 18:00
|
|
||||||
2. Persistent Rate Limits - Warns when API rate limiting persists after multiple errors
|
|
||||||
3. Home Not Found - Warns when a home no longer exists in the Tibber account
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import logging
|
|
||||||
from typing import TYPE_CHECKING
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import DOMAIN
|
|
||||||
from homeassistant.helpers import issue_registry as ir
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
from homeassistant.core import HomeAssistant
|
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# Repair issue tracking thresholds
|
|
||||||
TOMORROW_DATA_WARNING_HOUR = 18 # Warn after 18:00 if tomorrow data missing
|
|
||||||
RATE_LIMIT_WARNING_THRESHOLD = 3 # Warn after 3 consecutive rate limit errors
|
|
||||||
|
|
||||||
|
|
||||||
class TibberPricesRepairManager:
|
|
||||||
"""Manage repair issues for Tibber Prices integration."""
|
|
||||||
|
|
||||||
def __init__(self, hass: HomeAssistant, entry_id: str, home_name: str) -> None:
|
|
||||||
"""
|
|
||||||
Initialize repair manager.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
hass: Home Assistant instance
|
|
||||||
entry_id: Config entry ID for this home
|
|
||||||
home_name: Display name of the home (for user-friendly messages)
|
|
||||||
|
|
||||||
"""
|
|
||||||
self._hass = hass
|
|
||||||
self._entry_id = entry_id
|
|
||||||
self._home_name = home_name
|
|
||||||
|
|
||||||
# Track consecutive rate limit errors
|
|
||||||
self._rate_limit_error_count = 0
|
|
||||||
|
|
||||||
# Track if repairs are currently active
|
|
||||||
self._tomorrow_data_repair_active = False
|
|
||||||
self._rate_limit_repair_active = False
|
|
||||||
self._home_not_found_repair_active = False
|
|
||||||
|
|
||||||
async def check_tomorrow_data_availability(
|
|
||||||
self,
|
|
||||||
has_tomorrow_data: bool, # noqa: FBT001 - Clear meaning in context
|
|
||||||
current_time: datetime,
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Check if tomorrow data is available and create/clear repair as needed.
|
|
||||||
|
|
||||||
Creates repair if:
|
|
||||||
- Current hour >= 18:00 (after expected data availability)
|
|
||||||
- Tomorrow's data is missing
|
|
||||||
|
|
||||||
Clears repair if:
|
|
||||||
- Tomorrow's data is now available
|
|
||||||
|
|
||||||
Args:
|
|
||||||
has_tomorrow_data: Whether tomorrow's data is available
|
|
||||||
current_time: Current local datetime for hour check
|
|
||||||
|
|
||||||
"""
|
|
||||||
should_warn = current_time.hour >= TOMORROW_DATA_WARNING_HOUR and not has_tomorrow_data
|
|
||||||
|
|
||||||
if should_warn and not self._tomorrow_data_repair_active:
|
|
||||||
await self._create_tomorrow_data_repair()
|
|
||||||
elif not should_warn and self._tomorrow_data_repair_active:
|
|
||||||
await self._clear_tomorrow_data_repair()
|
|
||||||
|
|
||||||
async def track_rate_limit_error(self) -> None:
|
|
||||||
"""
|
|
||||||
Track rate limit error and create repair if threshold exceeded.
|
|
||||||
|
|
||||||
Increments rate limit error counter and creates repair issue
|
|
||||||
if threshold (3 consecutive errors) is reached.
|
|
||||||
"""
|
|
||||||
self._rate_limit_error_count += 1
|
|
||||||
|
|
||||||
if self._rate_limit_error_count >= RATE_LIMIT_WARNING_THRESHOLD and not self._rate_limit_repair_active:
|
|
||||||
await self._create_rate_limit_repair()
|
|
||||||
|
|
||||||
async def clear_rate_limit_tracking(self) -> None:
|
|
||||||
"""
|
|
||||||
Clear rate limit error tracking after successful API call.
|
|
||||||
|
|
||||||
Resets counter and clears any active repair issue.
|
|
||||||
"""
|
|
||||||
self._rate_limit_error_count = min(self._rate_limit_error_count, 0)
|
|
||||||
|
|
||||||
if self._rate_limit_repair_active:
|
|
||||||
await self._clear_rate_limit_repair()
|
|
||||||
|
|
||||||
async def create_home_not_found_repair(self) -> None:
|
|
||||||
"""
|
|
||||||
Create repair for home no longer found in Tibber account.
|
|
||||||
|
|
||||||
This indicates the home was deleted from the user's Tibber account
|
|
||||||
but the config entry still exists in Home Assistant.
|
|
||||||
"""
|
|
||||||
if self._home_not_found_repair_active:
|
|
||||||
return
|
|
||||||
|
|
||||||
_LOGGER.warning(
|
|
||||||
"Home '%s' not found in Tibber account - creating repair issue",
|
|
||||||
self._home_name,
|
|
||||||
)
|
|
||||||
|
|
||||||
ir.async_create_issue(
|
|
||||||
self._hass,
|
|
||||||
DOMAIN,
|
|
||||||
f"home_not_found_{self._entry_id}",
|
|
||||||
is_fixable=True,
|
|
||||||
severity=ir.IssueSeverity.ERROR,
|
|
||||||
translation_key="home_not_found",
|
|
||||||
translation_placeholders={
|
|
||||||
"home_name": self._home_name,
|
|
||||||
"entry_id": self._entry_id,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
self._home_not_found_repair_active = True
|
|
||||||
|
|
||||||
async def clear_home_not_found_repair(self) -> None:
|
|
||||||
"""Clear home not found repair (home is available again or entry removed)."""
|
|
||||||
if not self._home_not_found_repair_active:
|
|
||||||
return
|
|
||||||
|
|
||||||
_LOGGER.debug("Clearing home not found repair for '%s'", self._home_name)
|
|
||||||
|
|
||||||
ir.async_delete_issue(
|
|
||||||
self._hass,
|
|
||||||
DOMAIN,
|
|
||||||
f"home_not_found_{self._entry_id}",
|
|
||||||
)
|
|
||||||
self._home_not_found_repair_active = False
|
|
||||||
|
|
||||||
async def clear_all_repairs(self) -> None:
|
|
||||||
"""
|
|
||||||
Clear all active repair issues.
|
|
||||||
|
|
||||||
Called during coordinator shutdown or entry removal.
|
|
||||||
"""
|
|
||||||
if self._tomorrow_data_repair_active:
|
|
||||||
await self._clear_tomorrow_data_repair()
|
|
||||||
if self._rate_limit_repair_active:
|
|
||||||
await self._clear_rate_limit_repair()
|
|
||||||
if self._home_not_found_repair_active:
|
|
||||||
await self.clear_home_not_found_repair()
|
|
||||||
|
|
||||||
async def _create_tomorrow_data_repair(self) -> None:
|
|
||||||
"""Create repair issue for missing tomorrow data."""
|
|
||||||
_LOGGER.warning(
|
|
||||||
"Tomorrow's price data missing after %d:00 for home '%s' - creating repair issue",
|
|
||||||
TOMORROW_DATA_WARNING_HOUR,
|
|
||||||
self._home_name,
|
|
||||||
)
|
|
||||||
|
|
||||||
ir.async_create_issue(
|
|
||||||
self._hass,
|
|
||||||
DOMAIN,
|
|
||||||
f"tomorrow_data_missing_{self._entry_id}",
|
|
||||||
is_fixable=False,
|
|
||||||
severity=ir.IssueSeverity.WARNING,
|
|
||||||
translation_key="tomorrow_data_missing",
|
|
||||||
translation_placeholders={
|
|
||||||
"home_name": self._home_name,
|
|
||||||
"warning_hour": str(TOMORROW_DATA_WARNING_HOUR),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
self._tomorrow_data_repair_active = True
|
|
||||||
|
|
||||||
async def _clear_tomorrow_data_repair(self) -> None:
|
|
||||||
"""Clear tomorrow data repair issue."""
|
|
||||||
_LOGGER.debug("Tomorrow's data now available for '%s' - clearing repair issue", self._home_name)
|
|
||||||
|
|
||||||
ir.async_delete_issue(
|
|
||||||
self._hass,
|
|
||||||
DOMAIN,
|
|
||||||
f"tomorrow_data_missing_{self._entry_id}",
|
|
||||||
)
|
|
||||||
self._tomorrow_data_repair_active = False
|
|
||||||
|
|
||||||
async def _create_rate_limit_repair(self) -> None:
|
|
||||||
"""Create repair issue for persistent rate limiting."""
|
|
||||||
_LOGGER.warning(
|
|
||||||
"Persistent API rate limiting detected for home '%s' (%d consecutive errors) - creating repair issue",
|
|
||||||
self._home_name,
|
|
||||||
self._rate_limit_error_count,
|
|
||||||
)
|
|
||||||
|
|
||||||
ir.async_create_issue(
|
|
||||||
self._hass,
|
|
||||||
DOMAIN,
|
|
||||||
f"rate_limit_exceeded_{self._entry_id}",
|
|
||||||
is_fixable=False,
|
|
||||||
severity=ir.IssueSeverity.WARNING,
|
|
||||||
translation_key="rate_limit_exceeded",
|
|
||||||
translation_placeholders={
|
|
||||||
"home_name": self._home_name,
|
|
||||||
"error_count": str(self._rate_limit_error_count),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
self._rate_limit_repair_active = True
|
|
||||||
|
|
||||||
async def _clear_rate_limit_repair(self) -> None:
|
|
||||||
"""Clear rate limit repair issue."""
|
|
||||||
_LOGGER.debug("Rate limiting resolved for '%s' - clearing repair issue", self._home_name)
|
|
||||||
|
|
||||||
ir.async_delete_issue(
|
|
||||||
self._hass,
|
|
||||||
DOMAIN,
|
|
||||||
f"rate_limit_exceeded_{self._entry_id}",
|
|
||||||
)
|
|
||||||
self._rate_limit_repair_active = False
|
|
||||||
|
|
@ -1,20 +1,7 @@
|
||||||
{
|
{
|
||||||
"apexcharts": {
|
"apexcharts": {
|
||||||
"title_rating_level": "Preisphasen Tagesverlauf",
|
"title_rating_level": "Preisphasen Tagesverlauf",
|
||||||
"title_level": "Preisniveau",
|
"title_level": "Preisniveau"
|
||||||
"hourly_suffix": "(Ø stündlich)",
|
|
||||||
"best_price_period_name": "Bestpreis-Zeitraum",
|
|
||||||
"peak_price_period_name": "Spitzenpreis-Zeitraum",
|
|
||||||
"notification": {
|
|
||||||
"metadata_sensor_unavailable": {
|
|
||||||
"title": "Tibber Prices: ApexCharts YAML mit eingeschränkter Funktionalität generiert",
|
|
||||||
"message": "Du hast gerade eine ApexCharts-Card-Konfiguration über die Entwicklerwerkzeuge generiert. Der Chart-Metadaten-Sensor ist aktuell deaktiviert, daher zeigt das generierte YAML nur **Basisfunktionalität** (Auto-Skalierung, fester Gradient bei 50%).\n\n**Für volle Funktionalität** (optimierte Skalierung, dynamische Verlaufsfarben):\n1. [Tibber Prices Integration öffnen](https://my.home-assistant.io/redirect/integration/?domain=tibber_prices)\n2. Aktiviere den 'Chart Metadata' Sensor\n3. **Generiere das YAML erneut** über die Entwicklerwerkzeuge\n4. **Ersetze den alten YAML-Code** in deinem Dashboard durch die neue Version\n\n⚠️ Nur den Sensor zu aktivieren reicht nicht - du musst das YAML neu generieren und ersetzen!"
|
|
||||||
},
|
|
||||||
"missing_cards": {
|
|
||||||
"title": "Tibber Prices: ApexCharts YAML kann nicht verwendet werden",
|
|
||||||
"message": "Du hast gerade eine ApexCharts-Card-Konfiguration über die Entwicklerwerkzeuge generiert, aber das generierte YAML **funktioniert nicht**, weil erforderliche Custom Cards fehlen.\n\n**Fehlende Cards:**\n{cards}\n\n**Um das generierte YAML zu nutzen:**\n1. Klicke auf die obigen Links, um die fehlenden Cards über HACS zu installieren\n2. Starte Home Assistant neu (manchmal erforderlich)\n3. **Generiere das YAML erneut** über die Entwicklerwerkzeuge\n4. Füge das YAML zu deinem Dashboard hinzu\n\n⚠️ Der aktuelle YAML-Code funktioniert nicht, bis alle Cards installiert sind!"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"sensor": {
|
"sensor": {
|
||||||
"current_interval_price": {
|
"current_interval_price": {
|
||||||
|
|
@ -22,7 +9,7 @@
|
||||||
"long_description": "Zeigt den aktuellen Preis pro kWh von deinem Tibber-Abonnement an",
|
"long_description": "Zeigt den aktuellen Preis pro kWh von deinem Tibber-Abonnement an",
|
||||||
"usage_tips": "Nutze dies, um Preise zu verfolgen oder Automatisierungen zu erstellen, die bei günstigem Strom ausgeführt werden"
|
"usage_tips": "Nutze dies, um Preise zu verfolgen oder Automatisierungen zu erstellen, die bei günstigem Strom ausgeführt werden"
|
||||||
},
|
},
|
||||||
"current_interval_price_base": {
|
"current_interval_price_major": {
|
||||||
"description": "Aktueller Strompreis in Hauptwährung (EUR/kWh, NOK/kWh, etc.) für Energie-Dashboard",
|
"description": "Aktueller Strompreis in Hauptwährung (EUR/kWh, NOK/kWh, etc.) für Energie-Dashboard",
|
||||||
"long_description": "Zeigt den aktuellen Preis pro kWh in Hauptwährungseinheiten an (z.B. EUR/kWh statt ct/kWh, NOK/kWh statt øre/kWh). Dieser Sensor ist speziell für die Verwendung mit dem Energie-Dashboard von Home Assistant konzipiert, das Preise in Standard-Währungseinheiten benötigt.",
|
"long_description": "Zeigt den aktuellen Preis pro kWh in Hauptwährungseinheiten an (z.B. EUR/kWh statt ct/kWh, NOK/kWh statt øre/kWh). Dieser Sensor ist speziell für die Verwendung mit dem Energie-Dashboard von Home Assistant konzipiert, das Preise in Standard-Währungseinheiten benötigt.",
|
||||||
"usage_tips": "Verwende diesen Sensor beim Konfigurieren des Energie-Dashboards unter Einstellungen → Dashboards → Energie. Wähle diesen Sensor als 'Entität mit dem aktuellen Preis' aus, um deine Energiekosten automatisch zu berechnen. Das Energie-Dashboard multipliziert deinen Energieverbrauch (kWh) mit diesem Preis, um die Gesamtkosten anzuzeigen."
|
"usage_tips": "Verwende diesen Sensor beim Konfigurieren des Energie-Dashboards unter Einstellungen → Dashboards → Energie. Wähle diesen Sensor als 'Entität mit dem aktuellen Preis' aus, um deine Energiekosten automatisch zu berechnen. Das Energie-Dashboard multipliziert deinen Energieverbrauch (kWh) mit diesem Preis, um die Gesamtkosten anzuzeigen."
|
||||||
|
|
@ -58,9 +45,9 @@
|
||||||
"usage_tips": "Nutze dies, um den Betrieb von Geräten während Spitzenpreiszeiten zu vermeiden"
|
"usage_tips": "Nutze dies, um den Betrieb von Geräten während Spitzenpreiszeiten zu vermeiden"
|
||||||
},
|
},
|
||||||
"average_price_today": {
|
"average_price_today": {
|
||||||
"description": "Der typische Strompreis für heute pro kWh (konfigurierbares Anzeigeformat)",
|
"description": "Der durchschnittliche Strompreis für heute pro kWh",
|
||||||
"long_description": "Zeigt den typischen Preis pro kWh für heute. **Standardmäßig zeigt der Status den Median** (resistent gegen extreme Preisspitzen, zeigt was du generell erwarten kannst). Du kannst dies in den Integrationsoptionen ändern, um stattdessen das arithmetische Mittel anzuzeigen. Der alternative Wert ist immer als Attribut `price_mean` oder `price_median` für Automatisierungen verfügbar.",
|
"long_description": "Zeigt den durchschnittlichen Preis pro kWh für den aktuellen Tag von deinem Tibber-Abonnement an",
|
||||||
"usage_tips": "Nutze den Status-Wert für die Anzeige. Für exakte Kostenberechnungen in Automatisierungen nutze: {{ state_attr('sensor.average_price_today', 'price_mean') }}"
|
"usage_tips": "Nutze dies als Grundlage für den Vergleich mit aktuellen Preisen"
|
||||||
},
|
},
|
||||||
"lowest_price_tomorrow": {
|
"lowest_price_tomorrow": {
|
||||||
"description": "Der niedrigste Strompreis für morgen pro kWh",
|
"description": "Der niedrigste Strompreis für morgen pro kWh",
|
||||||
|
|
@ -73,9 +60,9 @@
|
||||||
"usage_tips": "Nutze dies, um den Betrieb von Geräten während der teuersten Stunden morgen zu vermeiden. Plane nicht-essentielle Lasten außerhalb dieser Spitzenpreiszeiten."
|
"usage_tips": "Nutze dies, um den Betrieb von Geräten während der teuersten Stunden morgen zu vermeiden. Plane nicht-essentielle Lasten außerhalb dieser Spitzenpreiszeiten."
|
||||||
},
|
},
|
||||||
"average_price_tomorrow": {
|
"average_price_tomorrow": {
|
||||||
"description": "Der typische Strompreis für morgen pro kWh (konfigurierbares Anzeigeformat)",
|
"description": "Der durchschnittliche Strompreis für morgen pro kWh",
|
||||||
"long_description": "Zeigt den typischen Preis pro kWh für morgen. **Standardmäßig zeigt der Status den Median** (resistent gegen extreme Preisspitzen). Du kannst dies in den Integrationsoptionen ändern, um stattdessen das arithmetische Mittel anzuzeigen. Der alternative Wert ist als Attribut verfügbar. Dieser Sensor wird nicht verfügbar, bis die Preise für morgen von Tibber veröffentlicht werden (typischerweise zwischen 13:00 und 14:00 Uhr MEZ).",
|
"long_description": "Zeigt den durchschnittlichen Preis pro kWh für den morgigen Tag von deinem Tibber-Abonnement an. Dieser Sensor wird nicht verfügbar, bis die Preise für morgen von Tibber veröffentlicht werden (typischerweise zwischen 13:00 und 14:00 Uhr MEZ).",
|
||||||
"usage_tips": "Nutze den Status-Wert für Anzeige und schnelle Vergleiche. Für Automatisierungen, die exakte Kostenberechnungen benötigen, nutze das Attribut `price_mean`: {{ state_attr('sensor.average_price_tomorrow', 'price_mean') }}"
|
"usage_tips": "Nutze dies als Grundlinie für den Vergleich mit den morgigen Preisen und zur Verbrauchsplanung. Vergleiche mit dem heutigen Durchschnitt, um zu sehen, ob morgen insgesamt teurer oder günstiger wird."
|
||||||
},
|
},
|
||||||
"yesterday_price_level": {
|
"yesterday_price_level": {
|
||||||
"description": "Aggregiertes Preisniveau für gestern",
|
"description": "Aggregiertes Preisniveau für gestern",
|
||||||
|
|
@ -108,14 +95,14 @@
|
||||||
"usage_tips": "Nutze dies, um den morgigen Energieverbrauch basierend auf deinen persönlichen Preisschwellenwerten zu planen. Vergleiche mit heute, um zu entscheiden, ob du den Verbrauch auf morgen verschieben oder heute nutzen solltest."
|
"usage_tips": "Nutze dies, um den morgigen Energieverbrauch basierend auf deinen persönlichen Preisschwellenwerten zu planen. Vergleiche mit heute, um zu entscheiden, ob du den Verbrauch auf morgen verschieben oder heute nutzen solltest."
|
||||||
},
|
},
|
||||||
"trailing_price_average": {
|
"trailing_price_average": {
|
||||||
"description": "Der typische Strompreis der letzten 24 Stunden pro kWh (konfigurierbares Anzeigeformat)",
|
"description": "Der durchschnittliche Strompreis für die letzten 24 Stunden pro kWh",
|
||||||
"long_description": "Zeigt den typischen Preis pro kWh der letzten 24 Stunden. **Standardmäßig zeigt der Status den Median** (resistent gegen extreme Spitzen, zeigt welches Preisniveau typisch war). Du kannst dies in den Integrationsoptionen ändern, um stattdessen das arithmetische Mittel anzuzeigen. Der alternative Wert ist als Attribut verfügbar. Wird alle 15 Minuten aktualisiert.",
|
"long_description": "Zeigt den durchschnittlichen Preis pro kWh berechnet aus den letzten 24 Stunden (nachlaufender Durchschnitt) von deinem Tibber-Abonnement an. Dies bietet einen gleitenden Durchschnitt, der alle 15 Minuten basierend auf historischen Daten aktualisiert wird.",
|
||||||
"usage_tips": "Nutze den Status-Wert, um das typische aktuelle Preisniveau zu sehen. Für Kostenberechnungen nutze: {{ state_attr('sensor.trailing_price_average', 'price_mean') }}"
|
"usage_tips": "Nutze dies, um aktuelle Preise mit den jüngsten Trends zu vergleichen. Ein aktueller Preis deutlich über diesem Durchschnitt kann ein guter Zeitpunkt sein, um den Verbrauch zu reduzieren."
|
||||||
},
|
},
|
||||||
"leading_price_average": {
|
"leading_price_average": {
|
||||||
"description": "Der typische Strompreis für die nächsten 24 Stunden pro kWh (konfigurierbares Anzeigeformat)",
|
"description": "Der durchschnittliche Strompreis für die nächsten 24 Stunden pro kWh",
|
||||||
"long_description": "Zeigt den typischen Preis pro kWh für die nächsten 24 Stunden. **Standardmäßig zeigt der Status den Median** (resistent gegen extreme Spitzen, zeigt welches Preisniveau zu erwarten ist). Du kannst dies in den Integrationsoptionen ändern, um stattdessen das arithmetische Mittel anzuzeigen. Der alternative Wert ist als Attribut verfügbar.",
|
"long_description": "Zeigt den durchschnittlichen Preis pro kWh berechnet aus den nächsten 24 Stunden (vorlaufender Durchschnitt) von deinem Tibber-Abonnement an. Dies bietet einen vorausschauenden Durchschnitt basierend auf verfügbaren Prognosedaten.",
|
||||||
"usage_tips": "Nutze den Status-Wert, um das typische kommende Preisniveau zu sehen. Für Kostenberechnungen nutze: {{ state_attr('sensor.leading_price_average', 'price_mean') }}"
|
"usage_tips": "Nutze dies zur Energieverbrauchsplanung. Wenn der aktuelle Preis unter dem vorlaufenden Durchschnitt liegt, kann es ein guter Zeitpunkt sein, um energieintensive Geräte zu betreiben."
|
||||||
},
|
},
|
||||||
"trailing_price_min": {
|
"trailing_price_min": {
|
||||||
"description": "Der niedrigste Strompreis für die letzten 24 Stunden pro kWh",
|
"description": "Der niedrigste Strompreis für die letzten 24 Stunden pro kWh",
|
||||||
|
|
@ -289,27 +276,27 @@
|
||||||
},
|
},
|
||||||
"data_timestamp": {
|
"data_timestamp": {
|
||||||
"description": "Zeitstempel des letzten verfügbaren Preisintervalls",
|
"description": "Zeitstempel des letzten verfügbaren Preisintervalls",
|
||||||
"long_description": "Zeigt den Zeitstempel des letzten verfügbaren Preisdatenintervalls von deinem Tibber-Abonnement"
|
"long_description": "Zeigt den Zeitstempel des letzten verfügbaren Preisdatenintervalls von Ihrem Tibber-Abonnement"
|
||||||
},
|
},
|
||||||
"today_volatility": {
|
"today_volatility": {
|
||||||
"description": "Wie stark sich die Strompreise heute verändern",
|
"description": "Preisvolatilitätsklassifizierung für heute",
|
||||||
"long_description": "Zeigt, ob die heutigen Preise stabil bleiben oder stark schwanken. Niedrige Volatilität bedeutet recht konstante Preise – Timing ist kaum wichtig. Hohe Volatilität bedeutet spürbare Preisunterschiede über den Tag – gute Chance, den Verbrauch auf günstigere Zeiten zu verschieben. `price_coefficient_variation_%` zeigt den Prozentwert, `price_spread` die absolute Preisspanne.",
|
"long_description": "Zeigt, wie stark die Strompreise im Laufe des heutigen Tages variieren, basierend auf der Spannweite (Differenz zwischen höchstem und niedrigstem Preis). Klassifizierung: NIEDRIG = Spannweite < 5ct, MODERAT = 5-15ct, HOCH = 15-30ct, SEHR HOCH = >30ct.",
|
||||||
"usage_tips": "Nutze dies, um zu entscheiden, ob Optimierung sich lohnt. Bei niedriger Volatilität kannst du Geräte jederzeit laufen lassen. Bei hoher Volatilität sparst du spürbar, wenn du Best-Price-Perioden nutzt."
|
"usage_tips": "Verwenden Sie dies, um zu entscheiden, ob preisbasierte Optimierung lohnenswert ist. Zum Beispiel lohnt sich bei einer Balkonbatterie mit 15% Effizienzverlusten die Optimierung nur, wenn die Volatilität mindestens MODERAT ist. Erstellen Sie Automatisierungen, die die Volatilität prüfen, bevor Lade-/Entladezyklen geplant werden."
|
||||||
},
|
},
|
||||||
"tomorrow_volatility": {
|
"tomorrow_volatility": {
|
||||||
"description": "Wie stark sich die Strompreise morgen verändern werden",
|
"description": "Preisvolatilitätsklassifizierung für morgen",
|
||||||
"long_description": "Zeigt, ob die Preise morgen stabil bleiben oder stark schwanken. Verfügbar, sobald die morgigen Daten veröffentlicht sind (typischerweise 13:00–14:00 MEZ). Niedrige Volatilität bedeutet recht konstante Preise – Timing ist nicht kritisch. Hohe Volatilität bedeutet deutliche Preisunterschiede über den Tag – gute Gelegenheit, energieintensive Aufgaben zu planen. `price_coefficient_variation_%` zeigt den Prozentwert, `price_spread` die absolute Preisspanne.",
|
"long_description": "Zeigt, wie stark die Strompreise im Laufe des morgigen Tages variieren werden, basierend auf der Spannweite (Differenz zwischen höchstem und niedrigstem Preis). Wird nicht verfügbar, bis morgige Daten veröffentlicht sind (typischerweise 13:00-14:00 MEZ).",
|
||||||
"usage_tips": "Nutze dies für die Planung des morgigen Energieverbrauchs. Hohe Volatilität? Plane flexible Lasten in Best-Price-Perioden. Niedrige Volatilität? Lass Geräte laufen, wann es dir passt."
|
"usage_tips": "Verwenden Sie dies zur Vorausplanung des morgigen Energieverbrauchs. Bei HOHER oder SEHR HOHER Volatilität morgen lohnt sich die Optimierung des Energieverbrauchs. Bei NIEDRIGER Volatilität können Sie Geräte jederzeit ohne wesentliche Kostenunterschiede betreiben."
|
||||||
},
|
},
|
||||||
"next_24h_volatility": {
|
"next_24h_volatility": {
|
||||||
"description": "Wie stark sich die Preise in den nächsten 24 Stunden verändern",
|
"description": "Preisvolatilitätsklassifizierung für die rollierenden nächsten 24 Stunden",
|
||||||
"long_description": "Zeigt die Preisvolatilität für ein rollierendes 24-Stunden-Fenster ab jetzt (aktualisiert alle 15 Minuten). Niedrige Volatilität bedeutet recht konstante Preise. Hohe Volatilität bedeutet spürbare Preisschwankungen und damit Chancen zur Optimierung. Im Unterschied zu Heute/Morgen-Sensoren überschreitet dieser Tagesgrenzen und liefert eine durchgängige Vorhersage. `price_coefficient_variation_%` zeigt den Prozentwert, `price_spread` die absolute Preisspanne.",
|
"long_description": "Zeigt, wie stark die Strompreise in den nächsten 24 Stunden ab jetzt variieren (rollierendes Fenster). Dies überschreitet Tagesgrenzen und aktualisiert sich alle 15 Minuten, wodurch eine vorausschauende Volatilitätsbewertung unabhängig von Kalendertagen bereitgestellt wird.",
|
||||||
"usage_tips": "Am besten für Entscheidungen in Echtzeit. Nutze dies für Batterieladestrategien oder andere flexible Lasten, die über Mitternacht laufen könnten. Bietet eine konsistente 24h-Perspektive unabhängig vom Kalendertag."
|
"usage_tips": "Bester Sensor für Echtzeitoptimierungsentscheidungen. Im Gegensatz zu Heute/Morgen-Sensoren, die um Mitternacht wechseln, bietet dies eine kontinuierliche 24h-Volatilitätsbewertung. Verwenden Sie dies für Batterielade-Strategien, die Tagesgrenzen überschreiten."
|
||||||
},
|
},
|
||||||
"today_tomorrow_volatility": {
|
"today_tomorrow_volatility": {
|
||||||
"description": "Kombinierte Preisvolatilität für heute und morgen",
|
"description": "Kombinierte Preisvolatilitätsklassifizierung für heute und morgen",
|
||||||
"long_description": "Zeigt die Gesamtvolatilität, wenn heute und morgen gemeinsam betrachtet werden (sobald die morgigen Daten verfügbar sind). Zeigt, ob über die Tagesgrenze hinweg deutliche Preisunterschiede bestehen. Fällt auf nur-heute zurück, wenn morgige Daten noch fehlen. Hilfreich für mehrtägige Optimierung. `price_coefficient_variation_%` zeigt den Prozentwert, `price_spread` die absolute Preisspanne.",
|
"long_description": "Zeigt die Volatilität über heute und morgen zusammen (wenn morgige Daten verfügbar sind). Bietet eine erweiterte Ansicht der Preisvariation über bis zu 48 Stunden. Fällt auf Nur-Heute zurück, wenn morgige Daten noch nicht verfügbar sind.",
|
||||||
"usage_tips": "Nutze dies für Aufgaben, die sich über mehrere Tage erstrecken. Prüfe, ob die Preisunterschiede groß genug für eine Planung sind. Die einzelnen Tages-Sensoren zeigen die Beiträge pro Tag, falls du mehr Details brauchst."
|
"usage_tips": "Verwenden Sie dies für Mehrtagsplanung und um zu verstehen, ob Preismöglichkeiten über die Tagesgrenze hinweg bestehen. Die Attribute 'today_volatility' und 'tomorrow_volatility' zeigen individuelle Tagesbeiträge. Nützlich für die Planung von Ladesitzungen, die Mitternacht überschreiten könnten."
|
||||||
},
|
},
|
||||||
"data_lifecycle_status": {
|
"data_lifecycle_status": {
|
||||||
"description": "Aktueller Status des Preisdaten-Lebenszyklus und der Zwischenspeicherung",
|
"description": "Aktueller Status des Preisdaten-Lebenszyklus und der Zwischenspeicherung",
|
||||||
|
|
@ -322,14 +309,14 @@
|
||||||
"usage_tips": "Nutze dies, um einen Countdown wie 'Günstiger Zeitraum endet in 2 Stunden' (wenn aktiv) oder 'Nächster günstiger Zeitraum endet um 14:00' (wenn inaktiv) anzuzeigen. Home Assistant zeigt automatisch relative Zeit für Zeitstempel-Sensoren an."
|
"usage_tips": "Nutze dies, um einen Countdown wie 'Günstiger Zeitraum endet in 2 Stunden' (wenn aktiv) oder 'Nächster günstiger Zeitraum endet um 14:00' (wenn inaktiv) anzuzeigen. Home Assistant zeigt automatisch relative Zeit für Zeitstempel-Sensoren an."
|
||||||
},
|
},
|
||||||
"best_price_period_duration": {
|
"best_price_period_duration": {
|
||||||
"description": "Gesamtlänge des aktuellen oder nächsten günstigen Zeitraums",
|
"description": "Gesamtlänge des aktuellen oder nächsten günstigen Zeitraums in Minuten",
|
||||||
"long_description": "Zeigt, wie lange der günstige Zeitraum insgesamt dauert. Der State wird in Stunden angezeigt (z. B. 1,5 h) für eine einfache Lesbarkeit in der UI, während das Attribut `period_duration_minutes` denselben Wert in Minuten bereitstellt (z. B. 90) für Automationen. Während eines aktiven Zeitraums zeigt dies die Dauer des aktuellen Zeitraums. Wenn kein Zeitraum aktiv ist, zeigt dies die Dauer des nächsten kommenden Zeitraums. Gibt nur 'Unbekannt' zurück, wenn keine Zeiträume ermittelt wurden.",
|
"long_description": "Zeigt, wie lange der günstige Zeitraum insgesamt dauert. Während eines aktiven Zeitraums zeigt dies die Dauer des aktuellen Zeitraums. Wenn kein Zeitraum aktiv ist, zeigt dies die Dauer des nächsten kommenden Zeitraums. Gibt nur 'Unbekannt' zurück, wenn keine Zeiträume ermittelt wurden.",
|
||||||
"usage_tips": "Für Anzeige: State-Wert (Stunden) in Dashboards nutzen. Für Automationen: Attribut `period_duration_minutes` verwenden, um zu prüfen, ob genug Zeit für langläufige Geräte ist (z. B. 'Wenn period_duration_minutes >= 90, starte Waschmaschine')."
|
"usage_tips": "Nützlich für Planung: 'Der nächste günstige Zeitraum dauert 90 Minuten' oder 'Der aktuelle günstige Zeitraum ist 120 Minuten lang'. Kombiniere mit remaining_minutes, um zu berechnen, wann langlaufende Geräte gestartet werden sollten."
|
||||||
},
|
},
|
||||||
"best_price_remaining_minutes": {
|
"best_price_remaining_minutes": {
|
||||||
"description": "Verbleibende Zeit im aktuellen günstigen Zeitraum",
|
"description": "Verbleibende Minuten im aktuellen günstigen Zeitraum (0 wenn inaktiv)",
|
||||||
"long_description": "Zeigt, wie viel Zeit im aktuellen günstigen Zeitraum noch verbleibt. Der State wird in Stunden angezeigt (z. B. 0,5 h) für eine einfache Lesbarkeit, während das Attribut `remaining_minutes` Minuten bereitstellt (z. B. 30) für Automationslogik. Gibt 0 zurück, wenn kein Zeitraum aktiv ist. Aktualisiert sich jede Minute. Prüfe binary_sensor.best_price_period, um zu sehen, ob ein Zeitraum aktuell aktiv ist.",
|
"long_description": "Zeigt, wie viele Minuten im aktuellen günstigen Zeitraum noch verbleiben. Gibt 0 zurück, wenn kein Zeitraum aktiv ist. Aktualisiert sich jede Minute. Prüfe binary_sensor.best_price_period, um zu sehen, ob ein Zeitraum aktuell aktiv ist.",
|
||||||
"usage_tips": "Für Automationen: Attribut `remaining_minutes` mit numerischen Vergleichen nutzen wie 'Wenn remaining_minutes > 0 UND remaining_minutes < 30, starte Waschmaschine jetzt'. Der Wert 0 macht es einfach zu prüfen, ob ein Zeitraum aktiv ist (Wert > 0) oder nicht (Wert = 0)."
|
"usage_tips": "Perfekt für Automatisierungen: 'Wenn remaining_minutes > 0 UND remaining_minutes < 30, starte Waschmaschine jetzt'. Der Wert 0 macht es einfach zu prüfen, ob ein Zeitraum aktiv ist (Wert > 0) oder nicht (Wert = 0)."
|
||||||
},
|
},
|
||||||
"best_price_progress": {
|
"best_price_progress": {
|
||||||
"description": "Fortschritt durch aktuellen günstigen Zeitraum (0% wenn inaktiv)",
|
"description": "Fortschritt durch aktuellen günstigen Zeitraum (0% wenn inaktiv)",
|
||||||
|
|
@ -342,9 +329,9 @@
|
||||||
"usage_tips": "Immer nützlich für Vorausplanung: 'Nächster günstiger Zeitraum startet in 3 Stunden' (egal, ob du gerade in einem Zeitraum bist oder nicht). Kombiniere mit Automatisierungen: 'Wenn nächste Startzeit in 10 Minuten ist, sende Benachrichtigung zur Vorbereitung der Waschmaschine'."
|
"usage_tips": "Immer nützlich für Vorausplanung: 'Nächster günstiger Zeitraum startet in 3 Stunden' (egal, ob du gerade in einem Zeitraum bist oder nicht). Kombiniere mit Automatisierungen: 'Wenn nächste Startzeit in 10 Minuten ist, sende Benachrichtigung zur Vorbereitung der Waschmaschine'."
|
||||||
},
|
},
|
||||||
"best_price_next_in_minutes": {
|
"best_price_next_in_minutes": {
|
||||||
"description": "Zeit bis zum nächsten günstigen Zeitraum",
|
"description": "Minuten bis nächster günstiger Zeitraum startet (0 beim Übergang)",
|
||||||
"long_description": "Zeigt, wie lange es bis zum nächsten günstigen Zeitraum dauert. Der State wird in Stunden angezeigt (z. B. 2,25 h) für Dashboards, während das Attribut `next_in_minutes` Minuten bereitstellt (z. B. 135) für Automationsbedingungen. Während eines aktiven Zeitraums zeigt dies die Zeit bis zum Zeitraum nach dem aktuellen. Gibt 0 während kurzer Übergangsphasen zurück. Aktualisiert sich jede Minute.",
|
"long_description": "Zeigt Minuten bis der nächste günstige Zeitraum startet. Während eines aktiven Zeitraums zeigt dies die Zeit bis zum Zeitraum nach dem aktuellen. Gibt 0 während kurzer Übergangsphasen zurück. Aktualisiert sich jede Minute.",
|
||||||
"usage_tips": "Für Automationen: Attribut `next_in_minutes` nutzen wie 'Wenn next_in_minutes > 0 UND next_in_minutes < 15, warte, bevor du die Geschirrspülmaschine startest'. Wert > 0 zeigt immer an, dass ein zukünftiger Zeitraum geplant ist."
|
"usage_tips": "Perfekt für 'warte bis günstiger Zeitraum' Automatisierungen: 'Wenn next_in_minutes > 0 UND next_in_minutes < 15, warte, bevor du die Geschirrspülmaschine startest'. Wert > 0 zeigt immer an, dass ein zukünftiger Zeitraum geplant ist."
|
||||||
},
|
},
|
||||||
"peak_price_end_time": {
|
"peak_price_end_time": {
|
||||||
"description": "Wann der aktuelle oder nächste teure Zeitraum endet",
|
"description": "Wann der aktuelle oder nächste teure Zeitraum endet",
|
||||||
|
|
@ -352,14 +339,14 @@
|
||||||
"usage_tips": "Nutze dies, um 'Teurer Zeitraum endet in 1 Stunde' (wenn aktiv) oder 'Nächster teurer Zeitraum endet um 18:00' (wenn inaktiv) anzuzeigen. Kombiniere mit Automatisierungen, um den Betrieb nach der Spitzenzeit fortzusetzen."
|
"usage_tips": "Nutze dies, um 'Teurer Zeitraum endet in 1 Stunde' (wenn aktiv) oder 'Nächster teurer Zeitraum endet um 18:00' (wenn inaktiv) anzuzeigen. Kombiniere mit Automatisierungen, um den Betrieb nach der Spitzenzeit fortzusetzen."
|
||||||
},
|
},
|
||||||
"peak_price_period_duration": {
|
"peak_price_period_duration": {
|
||||||
"description": "Länge des aktuellen/nächsten teuren Zeitraums",
|
"description": "Gesamtlänge des aktuellen oder nächsten teuren Zeitraums in Minuten",
|
||||||
"long_description": "Gesamtdauer des aktuellen oder nächsten teuren Zeitraums. Der State wird in Stunden angezeigt (z. B. 1,5 h) für leichtes Ablesen in der UI, während das Attribut `period_duration_minutes` denselben Wert in Minuten bereitstellt (z. B. 90) für Automationen. Dieser Wert repräsentiert die **volle geplante Dauer** des Zeitraums und ist konstant während des gesamten Zeitraums, auch wenn die verbleibende Zeit (remaining_minutes) abnimmt.",
|
"long_description": "Zeigt, wie lange der teure Zeitraum insgesamt dauert. Während eines aktiven Zeitraums zeigt dies die Dauer des aktuellen Zeitraums. Wenn kein Zeitraum aktiv ist, zeigt dies die Dauer des nächsten kommenden Zeitraums. Gibt nur 'Unbekannt' zurück, wenn keine Zeiträume ermittelt wurden.",
|
||||||
"usage_tips": "Kombiniere mit remaining_minutes, um zu berechnen, wann langlaufende Geräte gestoppt werden sollen: Zeitraum begann vor `period_duration_minutes - remaining_minutes` Minuten. Dieses Attribut unterstützt Energiespar-Strategien, indem es hilft, Hochverbrauchsaktivitäten außerhalb teurer Perioden zu planen."
|
"usage_tips": "Nützlich für Planung: 'Der nächste teure Zeitraum dauert 60 Minuten' oder 'Der aktuelle Spitzenzeitraum ist 90 Minuten lang'. Kombiniere mit remaining_minutes, um zu entscheiden, ob die Spitze abgewartet oder der Betrieb fortgesetzt werden soll."
|
||||||
},
|
},
|
||||||
"peak_price_remaining_minutes": {
|
"peak_price_remaining_minutes": {
|
||||||
"description": "Verbleibende Zeit im aktuellen teuren Zeitraum",
|
"description": "Verbleibende Minuten im aktuellen teuren Zeitraum (0 wenn inaktiv)",
|
||||||
"long_description": "Zeigt, wie viel Zeit im aktuellen teuren Zeitraum noch verbleibt. Der State wird in Stunden angezeigt (z. B. 0,75 h) für einfaches Ablesen in Dashboards, während das Attribut `remaining_minutes` dieselbe Zeit in Minuten liefert (z. B. 45) für Automationsbedingungen. **Countdown-Timer**: Dieser Wert dekrementiert jede Minute während eines aktiven Zeitraums. Gibt 0 zurück, wenn kein teurer Zeitraum aktiv ist. Aktualisiert sich minütlich.",
|
"long_description": "Zeigt, wie viele Minuten im aktuellen teuren Zeitraum noch verbleiben. Gibt 0 zurück, wenn kein Zeitraum aktiv ist. Aktualisiert sich jede Minute. Prüfe binary_sensor.peak_price_period, um zu sehen, ob ein Zeitraum aktuell aktiv ist.",
|
||||||
"usage_tips": "Für Automationen: Nutze Attribut `remaining_minutes` wie 'Wenn remaining_minutes > 60, setze Heizung auf Energiesparmodus' oder 'Wenn remaining_minutes < 15, erhöhe Temperatur wieder'. UI zeigt benutzerfreundliche Stunden (z. B. 1,25 h). Wert 0 zeigt an, dass kein teurer Zeitraum aktiv ist."
|
"usage_tips": "Nutze in Automatisierungen: 'Wenn remaining_minutes > 60, breche aufgeschobene Ladesitzung ab'. Wert 0 macht es einfach zu unterscheiden zwischen aktivem (Wert > 0) und inaktivem (Wert = 0) Zeitraum."
|
||||||
},
|
},
|
||||||
"peak_price_progress": {
|
"peak_price_progress": {
|
||||||
"description": "Fortschritt durch aktuellen teuren Zeitraum (0% wenn inaktiv)",
|
"description": "Fortschritt durch aktuellen teuren Zeitraum (0% wenn inaktiv)",
|
||||||
|
|
@ -372,9 +359,9 @@
|
||||||
"usage_tips": "Immer nützlich für Planung: 'Nächster teurer Zeitraum startet in 2 Stunden'. Automatisierung: 'Wenn nächste Startzeit in 30 Minuten ist, reduziere Heiztemperatur vorsorglich'."
|
"usage_tips": "Immer nützlich für Planung: 'Nächster teurer Zeitraum startet in 2 Stunden'. Automatisierung: 'Wenn nächste Startzeit in 30 Minuten ist, reduziere Heiztemperatur vorsorglich'."
|
||||||
},
|
},
|
||||||
"peak_price_next_in_minutes": {
|
"peak_price_next_in_minutes": {
|
||||||
"description": "Zeit bis zum nächsten teuren Zeitraum",
|
"description": "Minuten bis nächster teurer Zeitraum startet (0 beim Übergang)",
|
||||||
"long_description": "Zeigt, wie lange es bis zum nächsten teuren Zeitraum dauert. Der State wird in Stunden angezeigt (z. B. 2,25 h) für Dashboards, während das Attribut `next_in_minutes` Minuten bereitstellt (z. B. 135) für Automationsbedingungen. Während eines aktiven Zeitraums zeigt dies die Zeit bis zum Zeitraum nach dem aktuellen. Gibt 0 während kurzer Übergangsphasen zurück. Aktualisiert sich jede Minute.",
|
"long_description": "Zeigt Minuten bis der nächste teure Zeitraum startet. Während eines aktiven Zeitraums zeigt dies die Zeit bis zum Zeitraum nach dem aktuellen. Gibt 0 während kurzer Übergangsphasen zurück. Aktualisiert sich jede Minute.",
|
||||||
"usage_tips": "Für Automationen: Attribut `next_in_minutes` nutzen wie 'Wenn next_in_minutes > 0 UND next_in_minutes < 10, reduziere Heizung vorsorglich bevor der teure Zeitraum beginnt'. Wert > 0 zeigt immer an, dass ein zukünftiger teurer Zeitraum geplant ist."
|
"usage_tips": "Präventive Automatisierung: 'Wenn next_in_minutes > 0 UND next_in_minutes < 10, beende aktuellen Ladezyklus jetzt, bevor die Preise steigen'."
|
||||||
},
|
},
|
||||||
"home_type": {
|
"home_type": {
|
||||||
"description": "Art der Wohnung (Wohnung, Haus usw.)",
|
"description": "Art der Wohnung (Wohnung, Haus usw.)",
|
||||||
|
|
@ -450,11 +437,6 @@
|
||||||
"description": "Datenexport für Dashboard-Integrationen",
|
"description": "Datenexport für Dashboard-Integrationen",
|
||||||
"long_description": "Dieser Sensor ruft den get_chartdata-Service mit deiner konfigurierten YAML-Konfiguration auf und stellt das Ergebnis als Entity-Attribute bereit. Der Status zeigt 'ready' wenn Daten verfügbar sind, 'error' bei Fehlern, oder 'pending' vor dem ersten Aufruf. Perfekt für Dashboard-Integrationen wie ApexCharts, die Preisdaten aus Entity-Attributen lesen.",
|
"long_description": "Dieser Sensor ruft den get_chartdata-Service mit deiner konfigurierten YAML-Konfiguration auf und stellt das Ergebnis als Entity-Attribute bereit. Der Status zeigt 'ready' wenn Daten verfügbar sind, 'error' bei Fehlern, oder 'pending' vor dem ersten Aufruf. Perfekt für Dashboard-Integrationen wie ApexCharts, die Preisdaten aus Entity-Attributen lesen.",
|
||||||
"usage_tips": "Konfiguriere die YAML-Parameter in den Integrationsoptionen entsprechend deinem get_chartdata-Service-Aufruf. Der Sensor aktualisiert automatisch bei Preisdaten-Updates (typischerweise nach Mitternacht und wenn morgige Daten eintreffen). Greife auf die Service-Response-Daten direkt über die Entity-Attribute zu - die Struktur entspricht exakt dem, was get_chartdata zurückgibt."
|
"usage_tips": "Konfiguriere die YAML-Parameter in den Integrationsoptionen entsprechend deinem get_chartdata-Service-Aufruf. Der Sensor aktualisiert automatisch bei Preisdaten-Updates (typischerweise nach Mitternacht und wenn morgige Daten eintreffen). Greife auf die Service-Response-Daten direkt über die Entity-Attribute zu - die Struktur entspricht exakt dem, was get_chartdata zurückgibt."
|
||||||
},
|
|
||||||
"chart_metadata": {
|
|
||||||
"description": "Leichtgewichtige Metadaten für Diagrammkonfiguration",
|
|
||||||
"long_description": "Liefert wesentliche Diagrammkonfigurationswerte als Sensor-Attribute. Nützlich für jede Diagrammkarte, die Y-Achsen-Grenzen benötigt. Der Sensor ruft get_chartdata im Nur-Metadaten-Modus auf (keine Datenverarbeitung) und extrahiert: yaxis_min, yaxis_max (vorgeschlagener Y-Achsenbereich für optimale Skalierung). Der Status spiegelt das Service-Call-Ergebnis wider: 'ready' bei Erfolg, 'error' bei Fehler, 'pending' während der Initialisierung.",
|
|
||||||
"usage_tips": "Konfiguriere über configuration.yaml unter tibber_prices.chart_metadata_config (optional: day, subunit_currency, resolution). Der Sensor aktualisiert sich automatisch bei Preisdatenänderungen. Greife auf Metadaten aus Attributen zu: yaxis_min, yaxis_max. Verwende mit config-template-card oder jedem Tool, das Entity-Attribute liest - perfekt für dynamische Diagrammkonfiguration ohne manuelle Berechnungen."
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"binary_sensor": {
|
"binary_sensor": {
|
||||||
|
|
@ -489,80 +471,6 @@
|
||||||
"usage_tips": "Verwende dies, um zu überprüfen, ob Echtzeit-Verbrauchsdaten verfügbar sind. Aktiviere Benachrichtigungen, falls dies unerwartet auf 'Aus' wechselt, was auf potenzielle Hardware- oder Verbindungsprobleme hinweist."
|
"usage_tips": "Verwende dies, um zu überprüfen, ob Echtzeit-Verbrauchsdaten verfügbar sind. Aktiviere Benachrichtigungen, falls dies unerwartet auf 'Aus' wechselt, was auf potenzielle Hardware- oder Verbindungsprobleme hinweist."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"number": {
|
|
||||||
"best_price_flex_override": {
|
|
||||||
"description": "Maximaler Prozentsatz über dem Tagesminimumpreis, den Intervalle haben können und trotzdem als 'Bestpreis' gelten. Empfohlen: 15-20 mit Lockerung aktiviert (Standard), oder 25-35 ohne Lockerung. Maximum: 50 (Obergrenze für zuverlässige Periodenerkennung).",
|
|
||||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Flexibilität' aus dem Optionen-Dialog für die Bestpreis-Periodenberechnung.",
|
|
||||||
"usage_tips": "Aktiviere diese Entität, um die Bestpreiserkennung dynamisch über Automatisierungen anzupassen, z.B. höhere Flexibilität bei kritischen Lasten oder engere Anforderungen für flexible Geräte."
|
|
||||||
},
|
|
||||||
"best_price_min_distance_override": {
|
|
||||||
"description": "Minimaler prozentualer Abstand unter dem Tagesdurchschnitt. Intervalle müssen so weit unter dem Durchschnitt liegen, um als 'Bestpreis' zu gelten. Hilft, echte Niedrigpreis-Perioden von durchschnittlichen Preisen zu unterscheiden.",
|
|
||||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestabstand' aus dem Optionen-Dialog für die Bestpreis-Periodenberechnung.",
|
|
||||||
"usage_tips": "Erhöhe den Wert, wenn du strengere Bestpreis-Kriterien möchtest. Verringere ihn, wenn zu wenige Perioden erkannt werden."
|
|
||||||
},
|
|
||||||
"best_price_min_period_length_override": {
|
|
||||||
"description": "Minimale Periodenl\u00e4nge in 15-Minuten-Intervallen. Perioden kürzer als diese werden nicht gemeldet. Beispiel: 2 = mindestens 30 Minuten.",
|
|
||||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestperiodenlänge' aus dem Optionen-Dialog für die Bestpreis-Periodenberechnung.",
|
|
||||||
"usage_tips": "Passe an die typische Laufzeit deiner Geräte an: 2 (30 Min) für Schnellprogramme, 4-8 (1-2 Std) für normale Zyklen, 8+ für lange ECO-Programme."
|
|
||||||
},
|
|
||||||
"best_price_min_periods_override": {
|
|
||||||
"description": "Minimale Anzahl an Bestpreis-Perioden, die täglich gefunden werden sollen. Wenn Lockerung aktiviert ist, wird das System die Kriterien automatisch anpassen, um diese Zahl zu erreichen.",
|
|
||||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestperioden' aus dem Optionen-Dialog für die Bestpreis-Periodenberechnung.",
|
|
||||||
"usage_tips": "Setze dies auf die Anzahl zeitkritischer Aufgaben, die du täglich hast. Beispiel: 2 für zwei Waschmaschinenladungen."
|
|
||||||
},
|
|
||||||
"best_price_relaxation_attempts_override": {
|
|
||||||
"description": "Anzahl der Versuche, die Kriterien schrittweise zu lockern, um die Mindestperiodenanzahl zu erreichen. Jeder Versuch erhöht die Flexibilität um 3 Prozent. Bei 0 werden nur Basis-Kriterien verwendet.",
|
|
||||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Lockerungsversuche' aus dem Optionen-Dialog für die Bestpreis-Periodenberechnung.",
|
|
||||||
"usage_tips": "Höhere Werte machen die Periodenerkennung anpassungsfähiger an Tage mit stabilen Preisen. Setze auf 0, um strenge Kriterien ohne Lockerung zu erzwingen."
|
|
||||||
},
|
|
||||||
"best_price_gap_count_override": {
|
|
||||||
"description": "Maximale Anzahl teurerer Intervalle, die zwischen günstigen Intervallen erlaubt sind und trotzdem als eine zusammenhängende Periode gelten. Bei 0 müssen günstige Intervalle aufeinander folgen.",
|
|
||||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Lückentoleranz' aus dem Optionen-Dialog für die Bestpreis-Periodenberechnung.",
|
|
||||||
"usage_tips": "Erhöhe dies für Geräte mit variabler Last (z.B. Wärmepumpen), die kurze teurere Intervalle tolerieren können. Setze auf 0 für kontinuierliche günstige Perioden."
|
|
||||||
},
|
|
||||||
"peak_price_flex_override": {
|
|
||||||
"description": "Maximaler Prozentsatz unter dem Tagesmaximumpreis, den Intervalle haben können und trotzdem als 'Spitzenpreis' gelten. Gleiche Empfehlungen wie für Bestpreis-Flexibilität.",
|
|
||||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Flexibilität' aus dem Optionen-Dialog für die Spitzenpreis-Periodenberechnung.",
|
|
||||||
"usage_tips": "Nutze dies, um den Spitzenpreis-Schwellenwert zur Laufzeit für Automatisierungen anzupassen, die den Verbrauch während teurer Stunden vermeiden."
|
|
||||||
},
|
|
||||||
"peak_price_min_distance_override": {
|
|
||||||
"description": "Minimaler prozentualer Abstand über dem Tagesdurchschnitt. Intervalle müssen so weit über dem Durchschnitt liegen, um als 'Spitzenpreis' zu gelten.",
|
|
||||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestabstand' aus dem Optionen-Dialog für die Spitzenpreis-Periodenberechnung.",
|
|
||||||
"usage_tips": "Erhöhe den Wert, um nur extreme Preisspitzen zu erfassen. Verringere ihn, um mehr Hochpreiszeiten einzubeziehen."
|
|
||||||
},
|
|
||||||
"peak_price_min_period_length_override": {
|
|
||||||
"description": "Minimale Periodenl\u00e4nge in 15-Minuten-Intervallen für Spitzenpreise. Kürzere Preisspitzen werden nicht als Perioden gemeldet.",
|
|
||||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestperiodenlänge' aus dem Optionen-Dialog für die Spitzenpreis-Periodenberechnung.",
|
|
||||||
"usage_tips": "Kürzere Werte erfassen kurze Preisspitzen. Längere Werte fokussieren auf anhaltende Hochpreisphasen."
|
|
||||||
},
|
|
||||||
"peak_price_min_periods_override": {
|
|
||||||
"description": "Minimale Anzahl an Spitzenpreis-Perioden, die täglich gefunden werden sollen.",
|
|
||||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestperioden' aus dem Optionen-Dialog für die Spitzenpreis-Periodenberechnung.",
|
|
||||||
"usage_tips": "Setze dies basierend darauf, wie viele Hochpreisphasen du pro Tag für Automatisierungen erfassen möchtest."
|
|
||||||
},
|
|
||||||
"peak_price_relaxation_attempts_override": {
|
|
||||||
"description": "Anzahl der Versuche, die Kriterien zu lockern, um die Mindestanzahl an Spitzenpreis-Perioden zu erreichen.",
|
|
||||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Lockerungsversuche' aus dem Optionen-Dialog für die Spitzenpreis-Periodenberechnung.",
|
|
||||||
"usage_tips": "Erhöhe dies, wenn an Tagen mit stabilen Preisen keine Perioden gefunden werden. Setze auf 0, um strenge Kriterien zu erzwingen."
|
|
||||||
},
|
|
||||||
"peak_price_gap_count_override": {
|
|
||||||
"description": "Maximale Anzahl günstigerer Intervalle, die zwischen teuren Intervallen erlaubt sind und trotzdem als eine Spitzenpreis-Periode gelten.",
|
|
||||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Lückentoleranz' aus dem Optionen-Dialog für die Spitzenpreis-Periodenberechnung.",
|
|
||||||
"usage_tips": "Höhere Werte erfassen längere Hochpreisphasen auch mit kurzen Preiseinbrüchen. Setze auf 0, um strikt zusammenhängende Spitzenpreise zu erfassen."
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"switch": {
|
|
||||||
"best_price_enable_relaxation_override": {
|
|
||||||
"description": "Wenn aktiviert, werden die Kriterien automatisch gelockert, um die Mindestperiodenanzahl zu erreichen. Wenn deaktiviert, werden nur Perioden gemeldet, die die strengen Kriterien erfüllen (möglicherweise null Perioden bei stabilen Preisen).",
|
|
||||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestanzahl erreichen' aus dem Optionen-Dialog für die Bestpreis-Periodenberechnung.",
|
|
||||||
"usage_tips": "Aktiviere dies für garantierte tägliche Automatisierungsmöglichkeiten. Deaktiviere es, wenn du nur wirklich günstige Zeiträume willst, auch wenn das bedeutet, dass an manchen Tagen keine Perioden gefunden werden."
|
|
||||||
},
|
|
||||||
"peak_price_enable_relaxation_override": {
|
|
||||||
"description": "Wenn aktiviert, werden die Kriterien automatisch gelockert, um die Mindestperiodenanzahl zu erreichen. Wenn deaktiviert, werden nur echte Preisspitzen gemeldet.",
|
|
||||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestanzahl erreichen' aus dem Optionen-Dialog für die Spitzenpreis-Periodenberechnung.",
|
|
||||||
"usage_tips": "Aktiviere dies für konsistente Spitzenpreis-Warnungen. Deaktiviere es, um nur extreme Preisspitzen zu erfassen."
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"home_types": {
|
"home_types": {
|
||||||
"APARTMENT": "Wohnung",
|
"APARTMENT": "Wohnung",
|
||||||
"ROWHOUSE": "Reihenhaus",
|
"ROWHOUSE": "Reihenhaus",
|
||||||
|
|
|
||||||
|
|
@ -1,20 +1,7 @@
|
||||||
{
|
{
|
||||||
"apexcharts": {
|
"apexcharts": {
|
||||||
"title_rating_level": "Price Phases Daily Progress",
|
"title_rating_level": "Price Phases Daily Progress",
|
||||||
"title_level": "Price Level",
|
"title_level": "Price Level"
|
||||||
"hourly_suffix": "(Ø hourly)",
|
|
||||||
"best_price_period_name": "Best Price Period",
|
|
||||||
"peak_price_period_name": "Peak Price Period",
|
|
||||||
"notification": {
|
|
||||||
"metadata_sensor_unavailable": {
|
|
||||||
"title": "Tibber Prices: ApexCharts YAML Generated with Limited Functionality",
|
|
||||||
"message": "You just generated an ApexCharts card configuration via Developer Tools. The Chart Metadata sensor is currently disabled, so the generated YAML will only show **basic functionality** (auto-scale axis, fixed gradient at 50%).\n\n**To enable full functionality** (optimized scaling, dynamic gradient colors):\n1. [Open Tibber Prices Integration](https://my.home-assistant.io/redirect/integration/?domain=tibber_prices)\n2. Enable the 'Chart Metadata' sensor\n3. **Generate the YAML again** via Developer Tools\n4. **Replace the old YAML** in your dashboard with the new version\n\n⚠️ Simply enabling the sensor is not enough - you must regenerate and replace the YAML code!"
|
|
||||||
},
|
|
||||||
"missing_cards": {
|
|
||||||
"title": "Tibber Prices: ApexCharts YAML Cannot Be Used",
|
|
||||||
"message": "You just generated an ApexCharts card configuration via Developer Tools, but the generated YAML **will not work** because required custom cards are missing.\n\n**Missing cards:**\n{cards}\n\n**To use the generated YAML:**\n1. Click the links above to install the missing cards from HACS\n2. Restart Home Assistant (sometimes needed)\n3. **Generate the YAML again** via Developer Tools\n4. Add the YAML to your dashboard\n\n⚠️ The current YAML code will not work until all cards are installed!"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"sensor": {
|
"sensor": {
|
||||||
"current_interval_price": {
|
"current_interval_price": {
|
||||||
|
|
@ -22,9 +9,9 @@
|
||||||
"long_description": "Shows the current price per kWh from your Tibber subscription",
|
"long_description": "Shows the current price per kWh from your Tibber subscription",
|
||||||
"usage_tips": "Use this to track prices or to create automations that run when electricity is cheap"
|
"usage_tips": "Use this to track prices or to create automations that run when electricity is cheap"
|
||||||
},
|
},
|
||||||
"current_interval_price_base": {
|
"current_interval_price_major": {
|
||||||
"description": "Current electricity price in base currency (EUR/kWh, NOK/kWh, etc.) for Energy Dashboard",
|
"description": "Current electricity price in major currency (EUR/kWh, NOK/kWh, etc.) for Energy Dashboard",
|
||||||
"long_description": "Shows the current price per kWh in base currency units (e.g., EUR/kWh instead of ct/kWh, NOK/kWh instead of øre/kWh). This sensor is specifically designed for use with Home Assistant's Energy Dashboard, which requires prices in standard currency units.",
|
"long_description": "Shows the current price per kWh in major currency units (e.g., EUR/kWh instead of ct/kWh, NOK/kWh instead of øre/kWh). This sensor is specifically designed for use with Home Assistant's Energy Dashboard, which requires prices in standard currency units.",
|
||||||
"usage_tips": "Use this sensor when configuring the Energy Dashboard under Settings → Dashboards → Energy. Select this sensor as the 'Entity with current price' to automatically calculate your energy costs. The Energy Dashboard multiplies your energy consumption (kWh) by this price to show total costs."
|
"usage_tips": "Use this sensor when configuring the Energy Dashboard under Settings → Dashboards → Energy. Select this sensor as the 'Entity with current price' to automatically calculate your energy costs. The Energy Dashboard multiplies your energy consumption (kWh) by this price to show total costs."
|
||||||
},
|
},
|
||||||
"next_interval_price": {
|
"next_interval_price": {
|
||||||
|
|
@ -58,9 +45,9 @@
|
||||||
"usage_tips": "Use this to avoid running appliances during peak price times"
|
"usage_tips": "Use this to avoid running appliances during peak price times"
|
||||||
},
|
},
|
||||||
"average_price_today": {
|
"average_price_today": {
|
||||||
"description": "The typical electricity price for today per kWh (configurable display format)",
|
"description": "The average electricity price for today per kWh",
|
||||||
"long_description": "Shows the typical price per kWh for today. **By default, the state displays the median** (resistant to extreme spikes, showing what you can generally expect). You can change this in the integration options to show the arithmetic mean instead. The alternate value is always available as attribute `price_mean` or `price_median` for automations.",
|
"long_description": "Shows the average price per kWh for the current day from your Tibber subscription",
|
||||||
"usage_tips": "Use the state value for display. For exact cost calculations in automations, use: {{ state_attr('sensor.average_price_today', 'price_mean') }}"
|
"usage_tips": "Use this as a baseline for comparing current prices"
|
||||||
},
|
},
|
||||||
"lowest_price_tomorrow": {
|
"lowest_price_tomorrow": {
|
||||||
"description": "The lowest electricity price for tomorrow per kWh",
|
"description": "The lowest electricity price for tomorrow per kWh",
|
||||||
|
|
@ -73,9 +60,9 @@
|
||||||
"usage_tips": "Use this to avoid running appliances during tomorrow's peak price times. Helpful for planning around expensive periods."
|
"usage_tips": "Use this to avoid running appliances during tomorrow's peak price times. Helpful for planning around expensive periods."
|
||||||
},
|
},
|
||||||
"average_price_tomorrow": {
|
"average_price_tomorrow": {
|
||||||
"description": "The typical electricity price for tomorrow per kWh (configurable display format)",
|
"description": "The average electricity price for tomorrow per kWh",
|
||||||
"long_description": "Shows the typical price per kWh for tomorrow. **By default, the state displays the median** (resistant to extreme spikes). You can change this in the integration options to show the arithmetic mean instead. The alternate value is available as attribute. This sensor becomes unavailable until tomorrow's data is published by Tibber (typically around 13:00-14:00 CET).",
|
"long_description": "Shows the average price per kWh for tomorrow from your Tibber subscription. This sensor becomes unavailable until tomorrow's data is published by Tibber (typically around 13:00-14:00 CET).",
|
||||||
"usage_tips": "Use this to plan tomorrow's energy consumption. For cost calculations, use: {{ state_attr('sensor.average_price_tomorrow', 'price_mean') }}"
|
"usage_tips": "Use this as a baseline for comparing tomorrow's prices and planning consumption. Compare with today's average to see if tomorrow will be more or less expensive overall."
|
||||||
},
|
},
|
||||||
"yesterday_price_level": {
|
"yesterday_price_level": {
|
||||||
"description": "Aggregated price level for yesterday",
|
"description": "Aggregated price level for yesterday",
|
||||||
|
|
@ -108,14 +95,14 @@
|
||||||
"usage_tips": "Use this to plan tomorrow's energy consumption based on your personalized price thresholds. Compare with today to decide if you should shift consumption to tomorrow or use energy today."
|
"usage_tips": "Use this to plan tomorrow's energy consumption based on your personalized price thresholds. Compare with today to decide if you should shift consumption to tomorrow or use energy today."
|
||||||
},
|
},
|
||||||
"trailing_price_average": {
|
"trailing_price_average": {
|
||||||
"description": "The typical electricity price for the past 24 hours per kWh (configurable display format)",
|
"description": "The average electricity price for the past 24 hours per kWh",
|
||||||
"long_description": "Shows the typical price per kWh for the past 24 hours. **By default, the state displays the median** (resistant to extreme spikes, showing what price level was typical). You can change this in the integration options to show the arithmetic mean instead. The alternate value is available as attribute. Updates every 15 minutes.",
|
"long_description": "Shows the average price per kWh calculated from the past 24 hours (trailing average) from your Tibber subscription. This provides a rolling average that updates every 15 minutes based on historical data.",
|
||||||
"usage_tips": "Use the state value to see the typical recent price level. For cost calculations, use: {{ state_attr('sensor.trailing_price_average', 'price_mean') }}"
|
"usage_tips": "Use this to compare current prices against recent trends. A current price significantly above this average may indicate a good time to reduce consumption."
|
||||||
},
|
},
|
||||||
"leading_price_average": {
|
"leading_price_average": {
|
||||||
"description": "The typical electricity price for the next 24 hours per kWh (configurable display format)",
|
"description": "The average electricity price for the next 24 hours per kWh",
|
||||||
"long_description": "Shows the typical price per kWh for the next 24 hours. **By default, the state displays the median** (resistant to extreme spikes, showing what price level to expect). You can change this in the integration options to show the arithmetic mean instead. The alternate value is available as attribute.",
|
"long_description": "Shows the average price per kWh calculated from the next 24 hours (leading average) from your Tibber subscription. This provides a forward-looking average based on available forecast data.",
|
||||||
"usage_tips": "Use the state value to see the typical upcoming price level. For cost calculations, use: {{ state_attr('sensor.leading_price_average', 'price_mean') }}"
|
"usage_tips": "Use this to plan energy usage. If the current price is below the leading average, it may be a good time to run energy-intensive appliances."
|
||||||
},
|
},
|
||||||
"trailing_price_min": {
|
"trailing_price_min": {
|
||||||
"description": "The minimum electricity price for the past 24 hours per kWh",
|
"description": "The minimum electricity price for the past 24 hours per kWh",
|
||||||
|
|
@ -292,24 +279,24 @@
|
||||||
"long_description": "Shows the timestamp of the latest available price data interval from your Tibber subscription"
|
"long_description": "Shows the timestamp of the latest available price data interval from your Tibber subscription"
|
||||||
},
|
},
|
||||||
"today_volatility": {
|
"today_volatility": {
|
||||||
"description": "How much electricity prices change throughout today",
|
"description": "Price volatility classification for today",
|
||||||
"long_description": "Indicates whether today's prices are stable or have big swings. Low volatility means prices stay fairly consistent—timing doesn't matter much. High volatility means significant price differences throughout the day—great opportunity to shift consumption to cheaper periods. Check `price_coefficient_variation_%` for the variance percentage and `price_spread` for the absolute price span.",
|
"long_description": "Shows how much electricity prices vary throughout today based on the spread (difference between highest and lowest price). Classification: LOW = spread < 5ct, MODERATE = 5-15ct, HIGH = 15-30ct, VERY HIGH = >30ct.",
|
||||||
"usage_tips": "Use this to decide if optimization is worth your effort. On low-volatility days, you can run devices anytime. On high-volatility days, following Best Price periods saves meaningful money."
|
"usage_tips": "Use this to decide if price-based optimization is worthwhile. For example, with a balcony battery that has 15% efficiency losses, optimization only makes sense when volatility is at least MODERATE. Create automations that check volatility before scheduling charging/discharging cycles."
|
||||||
},
|
},
|
||||||
"tomorrow_volatility": {
|
"tomorrow_volatility": {
|
||||||
"description": "How much electricity prices will change tomorrow",
|
"description": "Price volatility classification for tomorrow",
|
||||||
"long_description": "Indicates whether tomorrow's prices will be stable or have big swings. Available once tomorrow's data is published (typically 13:00-14:00 CET). Low volatility means prices stay fairly consistent—timing isn't critical. High volatility means significant price differences throughout the day—good opportunity for scheduling energy-intensive activities. Check `price_coefficient_variation_%` for the variance percentage and `price_spread` for the absolute price span.",
|
"long_description": "Shows how much electricity prices will vary throughout tomorrow based on the spread (difference between highest and lowest price). Becomes unavailable until tomorrow's data is published (typically 13:00-14:00 CET).",
|
||||||
"usage_tips": "Use for planning tomorrow's energy consumption. High volatility? Schedule flexible loads during Best Price periods. Low volatility? Run devices whenever is convenient."
|
"usage_tips": "Use this for advance planning of tomorrow's energy usage. If tomorrow has HIGH or VERY HIGH volatility, it's worth optimizing energy consumption timing. If LOW, you can run devices anytime without significant cost differences."
|
||||||
},
|
},
|
||||||
"next_24h_volatility": {
|
"next_24h_volatility": {
|
||||||
"description": "How much prices will change over the next 24 hours",
|
"description": "Price volatility classification for the rolling next 24 hours",
|
||||||
"long_description": "Indicates price volatility for a rolling 24-hour window from now (updates every 15 minutes). Low volatility means prices stay fairly consistent. High volatility means significant price swings offer optimization opportunities. Unlike today/tomorrow sensors, this crosses day boundaries and provides a continuous forward-looking assessment. Check `price_coefficient_variation_%` for the variance percentage and `price_spread` for the absolute price span.",
|
"long_description": "Shows how much electricity prices vary in the next 24 hours from now (rolling window). This crosses day boundaries and updates every 15 minutes, providing a forward-looking volatility assessment independent of calendar days.",
|
||||||
"usage_tips": "Best for real-time decisions. Use when planning battery charging strategies or other flexible loads that might span across midnight. Provides consistent 24h perspective regardless of calendar day."
|
"usage_tips": "Best sensor for real-time optimization decisions. Unlike today/tomorrow sensors that switch at midnight, this provides continuous 24h volatility assessment. Use for battery charging strategies that span across day boundaries."
|
||||||
},
|
},
|
||||||
"today_tomorrow_volatility": {
|
"today_tomorrow_volatility": {
|
||||||
"description": "Combined price volatility across today and tomorrow",
|
"description": "Combined price volatility classification for today and tomorrow",
|
||||||
"long_description": "Shows overall price volatility when considering both today and tomorrow together (when available). Indicates whether there are significant price differences across the day boundary. Falls back to today-only when tomorrow's data isn't available yet. Useful for understanding multi-day optimization opportunities. Check `price_coefficient_variation_%` for the variance percentage and `price_spread` for the absolute price span.",
|
"long_description": "Shows volatility across both today and tomorrow combined (when tomorrow's data is available). Provides an extended view of price variation spanning up to 48 hours. Falls back to today-only when tomorrow's data isn't available yet.",
|
||||||
"usage_tips": "Use for planning tasks that span multiple days. Check if prices vary enough to make scheduling worthwhile. The individual day volatility sensors show breakdown per day if you need more detail."
|
"usage_tips": "Use this for multi-day planning and to understand if price opportunities exist across the day boundary. The 'today_volatility' and 'tomorrow_volatility' breakdown attributes show individual day contributions. Useful for scheduling charging sessions that might span midnight."
|
||||||
},
|
},
|
||||||
"data_lifecycle_status": {
|
"data_lifecycle_status": {
|
||||||
"description": "Current state of price data lifecycle and caching",
|
"description": "Current state of price data lifecycle and caching",
|
||||||
|
|
@ -322,14 +309,14 @@
|
||||||
"usage_tips": "Use this to display a countdown like 'Cheap period ends in 2 hours' (when active) or 'Next cheap period ends at 14:00' (when inactive). Home Assistant automatically shows relative time for timestamp sensors."
|
"usage_tips": "Use this to display a countdown like 'Cheap period ends in 2 hours' (when active) or 'Next cheap period ends at 14:00' (when inactive). Home Assistant automatically shows relative time for timestamp sensors."
|
||||||
},
|
},
|
||||||
"best_price_period_duration": {
|
"best_price_period_duration": {
|
||||||
"description": "Total length of current or next best price period",
|
"description": "Total length of current or next best price period in minutes",
|
||||||
"long_description": "Shows how long the best price period lasts in total. The state is displayed in hours (e.g., 1.5 h) for easy reading in the UI, while the `period_duration_minutes` attribute provides the same value in minutes (e.g., 90) for use in automations. During an active period, shows the duration of the current period. When no period is active, shows the duration of the next upcoming period. Returns 'Unknown' only when no periods are configured.",
|
"long_description": "Shows how long the best price period lasts in total. During an active period, shows the duration of the current period. When no period is active, shows the duration of the next upcoming period. Returns 'Unknown' only when no periods are configured.",
|
||||||
"usage_tips": "For display: Use the state value (hours) in dashboards. For automations: Use `period_duration_minutes` attribute to check if there's enough time for long-running tasks (e.g., 'If period_duration_minutes >= 90, start washing machine')."
|
"usage_tips": "Useful for planning: 'The next cheap period lasts 90 minutes' or 'Current cheap period is 120 minutes long'. Combine with remaining_minutes to calculate when to start long-running appliances."
|
||||||
},
|
},
|
||||||
"best_price_remaining_minutes": {
|
"best_price_remaining_minutes": {
|
||||||
"description": "Time remaining in current best price period",
|
"description": "Minutes remaining in current best price period (0 when inactive)",
|
||||||
"long_description": "Shows how much time is left in the current best price period. The state displays in hours (e.g., 0.5 h) for easy reading, while the `remaining_minutes` attribute provides minutes (e.g., 30) for automation logic. Returns 0 when no period is active. Updates every minute. Check binary_sensor.best_price_period to see if a period is currently active.",
|
"long_description": "Shows how many minutes are left in the current best price period. Returns 0 when no period is active. Updates every minute. Check binary_sensor.best_price_period to see if a period is currently active.",
|
||||||
"usage_tips": "For automations: Use `remaining_minutes` attribute with numeric comparisons like 'If remaining_minutes > 0 AND remaining_minutes < 30, start washing machine now'. The value 0 makes it easy to check if a period is active (value > 0) or not (value = 0)."
|
"usage_tips": "Perfect for automations: 'If remaining_minutes > 0 AND remaining_minutes < 30, start washing machine now'. The value 0 makes it easy to check if a period is active (value > 0) or not (value = 0)."
|
||||||
},
|
},
|
||||||
"best_price_progress": {
|
"best_price_progress": {
|
||||||
"description": "Progress through current best price period (0% when inactive)",
|
"description": "Progress through current best price period (0% when inactive)",
|
||||||
|
|
@ -342,9 +329,9 @@
|
||||||
"usage_tips": "Always useful for planning ahead: 'Next cheap period starts in 3 hours' (whether you're in a period now or not). Combine with automations: 'When next start time is in 10 minutes, send notification to prepare washing machine'."
|
"usage_tips": "Always useful for planning ahead: 'Next cheap period starts in 3 hours' (whether you're in a period now or not). Combine with automations: 'When next start time is in 10 minutes, send notification to prepare washing machine'."
|
||||||
},
|
},
|
||||||
"best_price_next_in_minutes": {
|
"best_price_next_in_minutes": {
|
||||||
"description": "Time until next best price period starts",
|
"description": "Minutes until next best price period starts (0 when in transition)",
|
||||||
"long_description": "Shows how long until the next best price period starts. The state displays in hours (e.g., 2.25 h) for dashboards, while the `next_in_minutes` attribute provides minutes (e.g., 135) for automation conditions. During an active period, shows time until the period AFTER the current one. Returns 0 during brief transition moments. Updates every minute.",
|
"long_description": "Shows minutes until the next best price period starts. During an active period, shows time until the period AFTER the current one. Returns 0 during brief transition moments. Updates every minute.",
|
||||||
"usage_tips": "For automations: Use `next_in_minutes` attribute like 'If next_in_minutes > 0 AND next_in_minutes < 15, wait before starting dishwasher'. Value > 0 always indicates a future period is scheduled."
|
"usage_tips": "Perfect for 'wait until cheap period' automations: 'If next_in_minutes > 0 AND next_in_minutes < 15, wait before starting dishwasher'. Value > 0 always indicates a future period is scheduled."
|
||||||
},
|
},
|
||||||
"peak_price_end_time": {
|
"peak_price_end_time": {
|
||||||
"description": "When the current or next peak price period ends",
|
"description": "When the current or next peak price period ends",
|
||||||
|
|
@ -352,14 +339,14 @@
|
||||||
"usage_tips": "Use this to display 'Expensive period ends in 1 hour' (when active) or 'Next expensive period ends at 18:00' (when inactive). Combine with automations to resume operations after peak."
|
"usage_tips": "Use this to display 'Expensive period ends in 1 hour' (when active) or 'Next expensive period ends at 18:00' (when inactive). Combine with automations to resume operations after peak."
|
||||||
},
|
},
|
||||||
"peak_price_period_duration": {
|
"peak_price_period_duration": {
|
||||||
"description": "Total length of current or next peak price period",
|
"description": "Total length of current or next peak price period in minutes",
|
||||||
"long_description": "Shows how long the peak price period lasts in total. The state is displayed in hours (e.g., 0.75 h) for easy reading in the UI, while the `period_duration_minutes` attribute provides the same value in minutes (e.g., 45) for use in automations. During an active period, shows the duration of the current period. When no period is active, shows the duration of the next upcoming period. Returns 'Unknown' only when no periods are configured.",
|
"long_description": "Shows how long the peak price period lasts in total. During an active period, shows the duration of the current period. When no period is active, shows the duration of the next upcoming period. Returns 'Unknown' only when no periods are configured.",
|
||||||
"usage_tips": "For display: Use the state value (hours) in dashboards. For automations: Use `period_duration_minutes` attribute to decide whether to wait out the peak or proceed (e.g., 'If period_duration_minutes <= 60, pause operations')."
|
"usage_tips": "Useful for planning: 'The next expensive period lasts 60 minutes' or 'Current peak is 90 minutes long'. Combine with remaining_minutes to decide whether to wait out the peak or proceed with operations."
|
||||||
},
|
},
|
||||||
"peak_price_remaining_minutes": {
|
"peak_price_remaining_minutes": {
|
||||||
"description": "Time remaining in current peak price period",
|
"description": "Minutes remaining in current peak price period (0 when inactive)",
|
||||||
"long_description": "Shows how much time is left in the current peak price period. The state displays in hours (e.g., 1.0 h) for easy reading, while the `remaining_minutes` attribute provides minutes (e.g., 60) for automation logic. Returns 0 when no period is active. Updates every minute. Check binary_sensor.peak_price_period to see if a period is currently active.",
|
"long_description": "Shows how many minutes are left in the current peak price period. Returns 0 when no period is active. Updates every minute. Check binary_sensor.peak_price_period to see if a period is currently active.",
|
||||||
"usage_tips": "For automations: Use `remaining_minutes` attribute like 'If remaining_minutes > 60, cancel deferred charging session'. Value 0 makes it easy to distinguish active (value > 0) from inactive (value = 0) periods."
|
"usage_tips": "Use in automations: 'If remaining_minutes > 60, cancel deferred charging session'. Value 0 makes it easy to distinguish active (value > 0) from inactive (value = 0) periods."
|
||||||
},
|
},
|
||||||
"peak_price_progress": {
|
"peak_price_progress": {
|
||||||
"description": "Progress through current peak price period (0% when inactive)",
|
"description": "Progress through current peak price period (0% when inactive)",
|
||||||
|
|
@ -372,9 +359,9 @@
|
||||||
"usage_tips": "Always useful for planning: 'Next expensive period starts in 2 hours'. Automation: 'When next start time is in 30 minutes, reduce heating temperature preemptively'."
|
"usage_tips": "Always useful for planning: 'Next expensive period starts in 2 hours'. Automation: 'When next start time is in 30 minutes, reduce heating temperature preemptively'."
|
||||||
},
|
},
|
||||||
"peak_price_next_in_minutes": {
|
"peak_price_next_in_minutes": {
|
||||||
"description": "Time until next peak price period starts",
|
"description": "Minutes until next peak price period starts (0 when in transition)",
|
||||||
"long_description": "Shows how long until the next peak price period starts. The state displays in hours (e.g., 0.5 h) for dashboards, while the `next_in_minutes` attribute provides minutes (e.g., 30) for automation conditions. During an active period, shows time until the period AFTER the current one. Returns 0 during brief transition moments. Updates every minute.",
|
"long_description": "Shows minutes until the next peak price period starts. During an active period, shows time until the period AFTER the current one. Returns 0 during brief transition moments. Updates every minute.",
|
||||||
"usage_tips": "For automations: Use `next_in_minutes` attribute like 'If next_in_minutes > 0 AND next_in_minutes < 10, complete current charging cycle now before prices increase'."
|
"usage_tips": "Pre-emptive automation: 'If next_in_minutes > 0 AND next_in_minutes < 10, complete current charging cycle now before prices increase'."
|
||||||
},
|
},
|
||||||
"home_type": {
|
"home_type": {
|
||||||
"description": "Type of home (apartment, house, etc.)",
|
"description": "Type of home (apartment, house, etc.)",
|
||||||
|
|
@ -445,16 +432,6 @@
|
||||||
"description": "Status of your Tibber subscription",
|
"description": "Status of your Tibber subscription",
|
||||||
"long_description": "Shows whether your Tibber subscription is currently running, has ended, or is pending activation. A status of 'running' means you're actively receiving electricity through Tibber.",
|
"long_description": "Shows whether your Tibber subscription is currently running, has ended, or is pending activation. A status of 'running' means you're actively receiving electricity through Tibber.",
|
||||||
"usage_tips": "Use this to monitor your subscription status. Set up alerts if status changes from 'running' to ensure uninterrupted service."
|
"usage_tips": "Use this to monitor your subscription status. Set up alerts if status changes from 'running' to ensure uninterrupted service."
|
||||||
},
|
|
||||||
"chart_data_export": {
|
|
||||||
"description": "Data export for dashboard integrations",
|
|
||||||
"long_description": "This binary sensor calls the get_chartdata service with your configured YAML parameters and exposes the result as entity attributes. The state is 'on' when the service call succeeds and data is available, 'off' when the call fails or no configuration is set. Perfect for dashboard integrations like ApexCharts that need to read price data from entity attributes.",
|
|
||||||
"usage_tips": "Configure the YAML parameters in the integration options to match your get_chartdata service call. The sensor will automatically refresh when price data updates (typically after midnight and when tomorrow's data arrives). Access the service response data directly from the entity's attributes - the structure matches exactly what get_chartdata returns."
|
|
||||||
},
|
|
||||||
"chart_metadata": {
|
|
||||||
"description": "Lightweight metadata for chart configuration",
|
|
||||||
"long_description": "Provides essential chart configuration values as sensor attributes. Useful for any chart card that needs Y-axis bounds. The sensor calls get_chartdata with metadata-only mode (no data processing) and extracts: yaxis_min, yaxis_max (suggested Y-axis range for optimal scaling). The state reflects the service call result: 'ready' when successful, 'error' on failure, 'pending' during initialization.",
|
|
||||||
"usage_tips": "Configure via configuration.yaml under tibber_prices.chart_metadata_config (optional: day, subunit_currency, resolution). The sensor automatically refreshes when price data updates. Access metadata from attributes: yaxis_min, yaxis_max. Use with config-template-card or any tool that reads entity attributes - perfect for dynamic chart configuration without manual calculations."
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"binary_sensor": {
|
"binary_sensor": {
|
||||||
|
|
@ -487,80 +464,11 @@
|
||||||
"description": "Whether realtime consumption monitoring is active",
|
"description": "Whether realtime consumption monitoring is active",
|
||||||
"long_description": "Indicates if realtime electricity consumption monitoring is enabled and active for your Tibber home. This requires compatible metering hardware (e.g., Tibber Pulse) and an active subscription.",
|
"long_description": "Indicates if realtime electricity consumption monitoring is enabled and active for your Tibber home. This requires compatible metering hardware (e.g., Tibber Pulse) and an active subscription.",
|
||||||
"usage_tips": "Use this to verify that realtime consumption data is available. Enable notifications if this changes to 'off' unexpectedly, indicating potential hardware or connectivity issues."
|
"usage_tips": "Use this to verify that realtime consumption data is available. Enable notifications if this changes to 'off' unexpectedly, indicating potential hardware or connectivity issues."
|
||||||
}
|
|
||||||
},
|
|
||||||
"number": {
|
|
||||||
"best_price_flex_override": {
|
|
||||||
"description": "Maximum above the daily minimum price that intervals can be and still qualify as 'best price'. Recommended: 15-20 with relaxation enabled (default), or 25-35 without relaxation. Maximum: 50 (hard cap for reliable period detection).",
|
|
||||||
"long_description": "When this entity is enabled, its value overrides the 'Flexibility' setting from the options flow for best price period calculations.",
|
|
||||||
"usage_tips": "Enable this entity to dynamically adjust best price detection via automations. Higher values create longer periods, lower values are stricter."
|
|
||||||
},
|
},
|
||||||
"best_price_min_distance_override": {
|
"chart_data_export": {
|
||||||
"description": "Ensures periods are significantly cheaper than the daily average, not just marginally below it. This filters out noise and prevents marking slightly-below-average periods as 'best price' on days with flat prices. Higher values = stricter filtering (only truly cheap periods qualify).",
|
"description": "Data export for dashboard integrations",
|
||||||
"long_description": "When this entity is enabled, its value overrides the 'Minimum Distance' setting from the options flow for best price period calculations.",
|
"long_description": "This binary sensor calls the get_chartdata service with your configured YAML parameters and exposes the result as entity attributes. The state is 'on' when the service call succeeds and data is available, 'off' when the call fails or no configuration is set. Perfect for dashboard integrations like ApexCharts that need to read price data from entity attributes.",
|
||||||
"usage_tips": "Use in automations to adjust how much better than average the best price periods must be. Higher values require prices to be further below average."
|
"usage_tips": "Configure the YAML parameters in the integration options to match your get_chartdata service call. The sensor will automatically refresh when price data updates (typically after midnight and when tomorrow's data arrives). Access the service response data directly from the entity's attributes - the structure matches exactly what get_chartdata returns."
|
||||||
},
|
|
||||||
"best_price_min_period_length_override": {
|
|
||||||
"description": "Minimum duration for a period to be considered as 'best price'. Longer periods are more practical for running appliances like dishwashers or heat pumps. Best price periods require 60 minutes minimum (vs. 30 minutes for peak price warnings) because they should provide meaningful time windows for consumption planning.",
|
|
||||||
"long_description": "When this entity is enabled, its value overrides the 'Minimum Period Length' setting from the options flow for best price period calculations.",
|
|
||||||
"usage_tips": "Increase when your appliances need longer uninterrupted run times (e.g., washing machines, dishwashers)."
|
|
||||||
},
|
|
||||||
"best_price_min_periods_override": {
|
|
||||||
"description": "Minimum number of best price periods to aim for per day. Filters will be relaxed step-by-step to try achieving this count. Only active when 'Achieve Minimum Count' is enabled.",
|
|
||||||
"long_description": "When this entity is enabled, its value overrides the 'Minimum Periods' setting from the options flow for best price period calculations.",
|
|
||||||
"usage_tips": "Adjust dynamically based on how many times per day you need cheap electricity windows."
|
|
||||||
},
|
|
||||||
"best_price_relaxation_attempts_override": {
|
|
||||||
"description": "How many flex levels (attempts) to try before giving up. Each attempt runs all filter combinations at the new flex level. More attempts increase the chance of finding additional periods at the cost of longer processing time.",
|
|
||||||
"long_description": "When this entity is enabled, its value overrides the 'Relaxation Attempts' setting from the options flow for best price period calculations.",
|
|
||||||
"usage_tips": "Increase when periods are hard to find. Decrease for stricter price filtering."
|
|
||||||
},
|
|
||||||
"best_price_gap_count_override": {
|
|
||||||
"description": "Maximum number of consecutive intervals allowed that deviate by exactly one level step from the required level. This prevents periods from being split by occasional level deviations. Gap tolerance requires periods ≥90 minutes (6 intervals) to detect outliers effectively.",
|
|
||||||
"long_description": "When this entity is enabled, its value overrides the 'Gap Tolerance' setting from the options flow for best price period calculations.",
|
|
||||||
"usage_tips": "Increase to allow longer periods with occasional price spikes. Keep low for stricter continuous cheap periods."
|
|
||||||
},
|
|
||||||
"peak_price_flex_override": {
|
|
||||||
"description": "Maximum below the daily maximum price that intervals can be and still qualify as 'peak price'. Recommended: -15 to -20 with relaxation enabled (default), or -25 to -35 without relaxation. Maximum: -50 (hard cap for reliable period detection). Note: Negative values indicate distance below maximum.",
|
|
||||||
"long_description": "When this entity is enabled, its value overrides the 'Flexibility' setting from the options flow for peak price period calculations.",
|
|
||||||
"usage_tips": "Enable this entity to dynamically adjust peak price detection via automations. Higher values create longer peak periods."
|
|
||||||
},
|
|
||||||
"peak_price_min_distance_override": {
|
|
||||||
"description": "Ensures periods are significantly more expensive than the daily average, not just marginally above it. This filters out noise and prevents marking slightly-above-average periods as 'peak price' on days with flat prices. Higher values = stricter filtering (only truly expensive periods qualify).",
|
|
||||||
"long_description": "When this entity is enabled, its value overrides the 'Minimum Distance' setting from the options flow for peak price period calculations.",
|
|
||||||
"usage_tips": "Use in automations to adjust how much higher than average the peak price periods must be."
|
|
||||||
},
|
|
||||||
"peak_price_min_period_length_override": {
|
|
||||||
"description": "Minimum duration for a period to be considered as 'peak price'. Peak price warnings are allowed for shorter periods (30 minutes minimum vs. 60 minutes for best price) because brief expensive spikes are worth alerting about, even if they're too short for consumption planning.",
|
|
||||||
"long_description": "When this entity is enabled, its value overrides the 'Minimum Period Length' setting from the options flow for peak price period calculations.",
|
|
||||||
"usage_tips": "Increase to filter out brief price spikes, focusing on sustained expensive periods."
|
|
||||||
},
|
|
||||||
"peak_price_min_periods_override": {
|
|
||||||
"description": "Minimum number of peak price periods to aim for per day. Filters will be relaxed step-by-step to try achieving this count. Only active when 'Achieve Minimum Count' is enabled.",
|
|
||||||
"long_description": "When this entity is enabled, its value overrides the 'Minimum Periods' setting from the options flow for peak price period calculations.",
|
|
||||||
"usage_tips": "Adjust based on how many peak periods you want to identify and avoid."
|
|
||||||
},
|
|
||||||
"peak_price_relaxation_attempts_override": {
|
|
||||||
"description": "How many flex levels (attempts) to try before giving up. Each attempt runs all filter combinations at the new flex level. More attempts increase the chance of finding additional peak periods at the cost of longer processing time.",
|
|
||||||
"long_description": "When this entity is enabled, its value overrides the 'Relaxation Attempts' setting from the options flow for peak price period calculations.",
|
|
||||||
"usage_tips": "Increase when peak periods are hard to detect. Decrease for stricter peak price filtering."
|
|
||||||
},
|
|
||||||
"peak_price_gap_count_override": {
|
|
||||||
"description": "Maximum number of consecutive intervals allowed that deviate by exactly one level step from the required level. This prevents periods from being split by occasional level deviations. Gap tolerance requires periods ≥90 minutes (6 intervals) to detect outliers effectively.",
|
|
||||||
"long_description": "When this entity is enabled, its value overrides the 'Gap Tolerance' setting from the options flow for peak price period calculations.",
|
|
||||||
"usage_tips": "Increase to identify sustained expensive periods with brief dips. Keep low for stricter continuous peak detection."
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"switch": {
|
|
||||||
"best_price_enable_relaxation_override": {
|
|
||||||
"description": "When enabled, filters will be gradually relaxed if not enough periods are found. This attempts to reach the desired minimum number of periods, which may include less optimal time windows as best-price periods.",
|
|
||||||
"long_description": "When this entity is enabled, its value overrides the 'Achieve Minimum Count' setting from the options flow for best price period calculations.",
|
|
||||||
"usage_tips": "Turn OFF to disable relaxation and use strict filtering only. Turn ON to allow the algorithm to relax criteria to find more periods."
|
|
||||||
},
|
|
||||||
"peak_price_enable_relaxation_override": {
|
|
||||||
"description": "When enabled, filters will be gradually relaxed if not enough periods are found. This attempts to reach the desired minimum number of periods to ensure you're warned about expensive periods even on days with unusual price patterns.",
|
|
||||||
"long_description": "When this entity is enabled, its value overrides the 'Achieve Minimum Count' setting from the options flow for peak price period calculations.",
|
|
||||||
"usage_tips": "Turn OFF to disable relaxation and use strict filtering only. Turn ON to allow the algorithm to relax criteria to find more peak periods."
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"home_types": {
|
"home_types": {
|
||||||
|
|
|
||||||
|
|
@ -1,20 +1,7 @@
|
||||||
{
|
{
|
||||||
"apexcharts": {
|
"apexcharts": {
|
||||||
"title_rating_level": "Prisfaser dagsfremdrift",
|
"title_rating_level": "Prisfaser daglig fremgang",
|
||||||
"title_level": "Prisnivå",
|
"title_level": "Prisnivå"
|
||||||
"hourly_suffix": "(Ø per time)",
|
|
||||||
"best_price_period_name": "Beste prisperiode",
|
|
||||||
"peak_price_period_name": "Toppprisperiode",
|
|
||||||
"notification": {
|
|
||||||
"metadata_sensor_unavailable": {
|
|
||||||
"title": "Tibber Prices: ApexCharts YAML generert med begrenset funksjonalitet",
|
|
||||||
"message": "Du har nettopp generert en ApexCharts-kort-konfigurasjon via Utviklerverktøy. Diagram-metadata-sensoren er deaktivert, så den genererte YAML-en vil bare vise **grunnleggende funksjonalitet** (auto-skalering, fast gradient på 50%).\n\n**For full funksjonalitet** (optimert skalering, dynamiske gradientfarger):\n1. [Åpne Tibber Prices-integrasjonen](https://my.home-assistant.io/redirect/integration/?domain=tibber_prices)\n2. Aktiver 'Chart Metadata'-sensoren\n3. **Generer YAML-en på nytt** via Utviklerverktøy\n4. **Erstatt den gamle YAML-en** i dashbordet ditt med den nye versjonen\n\n⚠️ Det er ikke nok å bare aktivere sensoren - du må regenerere og erstatte YAML-koden!"
|
|
||||||
},
|
|
||||||
"missing_cards": {
|
|
||||||
"title": "Tibber Prices: ApexCharts YAML kan ikke brukes",
|
|
||||||
"message": "Du har nettopp generert en ApexCharts-kort-konfigurasjon via Utviklerverktøy, men den genererte YAML-en **vil ikke fungere** fordi nødvendige tilpassede kort mangler.\n\n**Manglende kort:**\n{cards}\n\n**For å bruke den genererte YAML-en:**\n1. Klikk på lenkene ovenfor for å installere de manglende kortene fra HACS\n2. Start Home Assistant på nytt (noen ganger nødvendig)\n3. **Generer YAML-en på nytt** via Utviklerverktøy\n4. Legg til YAML-en i dashbordet ditt\n\n⚠️ Den nåværende YAML-koden vil ikke fungere før alle kort er installert!"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"sensor": {
|
"sensor": {
|
||||||
"current_interval_price": {
|
"current_interval_price": {
|
||||||
|
|
@ -22,7 +9,7 @@
|
||||||
"long_description": "Viser nåværende pris per kWh fra ditt Tibber-abonnement",
|
"long_description": "Viser nåværende pris per kWh fra ditt Tibber-abonnement",
|
||||||
"usage_tips": "Bruk dette til å spore priser eller lage automatiseringer som kjører når strøm er billig"
|
"usage_tips": "Bruk dette til å spore priser eller lage automatiseringer som kjører når strøm er billig"
|
||||||
},
|
},
|
||||||
"current_interval_price_base": {
|
"current_interval_price_major": {
|
||||||
"description": "Nåværende elektrisitetspris i hovedvaluta (EUR/kWh, NOK/kWh, osv.) for Energi-dashboard",
|
"description": "Nåværende elektrisitetspris i hovedvaluta (EUR/kWh, NOK/kWh, osv.) for Energi-dashboard",
|
||||||
"long_description": "Viser nåværende pris per kWh i hovedvalutaenheter (f.eks. EUR/kWh i stedet for ct/kWh, NOK/kWh i stedet for øre/kWh). Denne sensoren er spesielt designet for bruk med Home Assistants Energi-dashboard, som krever priser i standard valutaenheter.",
|
"long_description": "Viser nåværende pris per kWh i hovedvalutaenheter (f.eks. EUR/kWh i stedet for ct/kWh, NOK/kWh i stedet for øre/kWh). Denne sensoren er spesielt designet for bruk med Home Assistants Energi-dashboard, som krever priser i standard valutaenheter.",
|
||||||
"usage_tips": "Bruk denne sensoren når du konfigurerer Energi-dashboardet under Innstillinger → Dashbord → Energi. Velg denne sensoren som 'Entitet med nåværende pris' for automatisk å beregne energikostnadene. Energi-dashboardet multipliserer energiforbruket ditt (kWh) med denne prisen for å vise totale kostnader."
|
"usage_tips": "Bruk denne sensoren når du konfigurerer Energi-dashboardet under Innstillinger → Dashbord → Energi. Velg denne sensoren som 'Entitet med nåværende pris' for automatisk å beregne energikostnadene. Energi-dashboardet multipliserer energiforbruket ditt (kWh) med denne prisen for å vise totale kostnader."
|
||||||
|
|
@ -58,9 +45,9 @@
|
||||||
"usage_tips": "Bruk dette til å unngå å kjøre apparater i toppristider"
|
"usage_tips": "Bruk dette til å unngå å kjøre apparater i toppristider"
|
||||||
},
|
},
|
||||||
"average_price_today": {
|
"average_price_today": {
|
||||||
"description": "Typisk elektrisitetspris i dag per kWh (konfigurerbart visningsformat)",
|
"description": "Den gjennomsnittlige elektrisitetsprisen i dag per kWh",
|
||||||
"long_description": "Viser prisen per kWh for gjeldende dag fra ditt Tibber-abonnement. **Som standard viser statusen medianen** (motstandsdyktig mot ekstreme prisspiss, viser typisk prisnivå). Du kan endre dette i integrasjonsinnstillingene for å vise det aritmetiske gjennomsnittet i stedet. Den alternative verdien er tilgjengelig som attributt.",
|
"long_description": "Viser gjennomsnittsprisen per kWh for gjeldende dag fra ditt Tibber-abonnement",
|
||||||
"usage_tips": "Bruk dette som baseline for å sammenligne nåværende priser. For beregninger bruk: {{ state_attr('sensor.average_price_today', 'price_mean') }}"
|
"usage_tips": "Bruk dette som en baseline for å sammenligne nåværende priser"
|
||||||
},
|
},
|
||||||
"lowest_price_tomorrow": {
|
"lowest_price_tomorrow": {
|
||||||
"description": "Den laveste elektrisitetsprisen i morgen per kWh",
|
"description": "Den laveste elektrisitetsprisen i morgen per kWh",
|
||||||
|
|
@ -73,9 +60,9 @@
|
||||||
"usage_tips": "Bruk dette til å unngå å kjøre apparater i morgendagens toppristider. Nyttig for å planlegge rundt dyre perioder."
|
"usage_tips": "Bruk dette til å unngå å kjøre apparater i morgendagens toppristider. Nyttig for å planlegge rundt dyre perioder."
|
||||||
},
|
},
|
||||||
"average_price_tomorrow": {
|
"average_price_tomorrow": {
|
||||||
"description": "Typisk elektrisitetspris i morgen per kWh (konfigurerbart visningsformat)",
|
"description": "Den gjennomsnittlige elektrisitetsprisen i morgen per kWh",
|
||||||
"long_description": "Viser prisen per kWh for morgendagen fra ditt Tibber-abonnement. **Som standard viser statusen medianen** (motstandsdyktig mot ekstreme prisspiss). Du kan endre dette i integrasjonsinnstillingene for å vise det aritmetiske gjennomsnittet i stedet. Den alternative verdien er tilgjengelig som attributt. Denne sensoren blir utilgjengelig inntil morgendagens data er publisert av Tibber (vanligvis rundt 13:00-14:00 CET).",
|
"long_description": "Viser gjennomsnittsprisen per kWh for morgendagen fra ditt Tibber-abonnement. Denne sensoren blir utilgjengelig inntil morgendagens data er publisert av Tibber (vanligvis rundt 13:00-14:00 CET).",
|
||||||
"usage_tips": "Bruk dette som baseline for å sammenligne morgendagens priser og planlegge forbruk. Sammenlign med dagens median for å se om morgendagen vil være mer eller mindre dyr totalt sett."
|
"usage_tips": "Bruk dette som en baseline for å sammenligne morgendagens priser og planlegge forbruk. Sammenlign med dagens gjennomsnitt for å se om morgendagen vil være mer eller mindre dyr totalt sett."
|
||||||
},
|
},
|
||||||
"yesterday_price_level": {
|
"yesterday_price_level": {
|
||||||
"description": "Aggregert prisnivå for i går",
|
"description": "Aggregert prisnivå for i går",
|
||||||
|
|
@ -108,14 +95,14 @@
|
||||||
"usage_tips": "Bruk dette for å planlegge morgendagens energiforbruk basert på dine personlige pristerskelverdier. Sammenlign med i dag for å bestemme om du skal flytte forbruk til i morgen eller bruke energi i dag."
|
"usage_tips": "Bruk dette for å planlegge morgendagens energiforbruk basert på dine personlige pristerskelverdier. Sammenlign med i dag for å bestemme om du skal flytte forbruk til i morgen eller bruke energi i dag."
|
||||||
},
|
},
|
||||||
"trailing_price_average": {
|
"trailing_price_average": {
|
||||||
"description": "Typisk elektrisitetspris for de siste 24 timene per kWh (konfigurerbart visningsformat)",
|
"description": "Den gjennomsnittlige elektrisitetsprisen for de siste 24 timene per kWh",
|
||||||
"long_description": "Viser prisen per kWh beregnet fra de siste 24 timene. **Som standard viser statusen medianen** (motstandsdyktig mot ekstreme prisspiss, viser typisk prisnivå). Du kan endre dette i integrasjonsinnstillingene for å vise det aritmetiske gjennomsnittet i stedet. Den alternative verdien er tilgjengelig som attributt. Oppdateres hvert 15. minutt.",
|
"long_description": "Viser gjennomsnittsprisen per kWh beregnet fra de siste 24 timene (glidende gjennomsnitt) fra ditt Tibber-abonnement. Dette gir et rullende gjennomsnitt som oppdateres hvert 15. minutt basert på historiske data.",
|
||||||
"usage_tips": "Bruk statusverdien for å se det typiske nåværende prisnivået. For kostnadsberegninger bruk: {{ state_attr('sensor.trailing_price_average', 'price_mean') }}"
|
"usage_tips": "Bruk dette til å sammenligne nåværende priser mot nylige trender. En nåværende pris betydelig over dette gjennomsnittet kan indikere et godt tidspunkt å redusere forbruket."
|
||||||
},
|
},
|
||||||
"leading_price_average": {
|
"leading_price_average": {
|
||||||
"description": "Typisk elektrisitetspris for de neste 24 timene per kWh (konfigurerbart visningsformat)",
|
"description": "Den gjennomsnittlige elektrisitetsprisen for de neste 24 timene per kWh",
|
||||||
"long_description": "Viser prisen per kWh beregnet fra de neste 24 timene. **Som standard viser statusen medianen** (motstandsdyktig mot ekstreme prisspiss, viser forventet prisnivå). Du kan endre dette i integrasjonsinnstillingene for å vise det aritmetiske gjennomsnittet i stedet. Den alternative verdien er tilgjengelig som attributt.",
|
"long_description": "Viser gjennomsnittsprisen per kWh beregnet fra de neste 24 timene (fremtidsrettet gjennomsnitt) fra ditt Tibber-abonnement. Dette gir et fremtidsrettet gjennomsnitt basert på tilgjengelige prognosedata.",
|
||||||
"usage_tips": "Bruk statusverdien for å se det typiske kommende prisnivået. For kostnadsberegninger bruk: {{ state_attr('sensor.leading_price_average', 'price_mean') }}"
|
"usage_tips": "Bruk dette til å planlegge energibruk. Hvis nåværende pris er under det fremtidsrettede gjennomsnittet, kan det være et godt tidspunkt å kjøre energikrevende apparater."
|
||||||
},
|
},
|
||||||
"trailing_price_min": {
|
"trailing_price_min": {
|
||||||
"description": "Den minste elektrisitetsprisen for de siste 24 timene per kWh",
|
"description": "Den minste elektrisitetsprisen for de siste 24 timene per kWh",
|
||||||
|
|
@ -292,24 +279,24 @@
|
||||||
"long_description": "Viser tidsstempelet for siste tilgjengelige prisdataintervall fra ditt Tibber-abonnement"
|
"long_description": "Viser tidsstempelet for siste tilgjengelige prisdataintervall fra ditt Tibber-abonnement"
|
||||||
},
|
},
|
||||||
"today_volatility": {
|
"today_volatility": {
|
||||||
"description": "Hvor mye strømprisene endrer seg i dag",
|
"description": "Prisvolatilitetsklassifisering for i dag",
|
||||||
"long_description": "Viser om dagens priser er stabile eller har store svingninger. Lav volatilitet betyr ganske jevne priser – timing betyr lite. Høy volatilitet betyr tydelige prisforskjeller gjennom dagen – en god sjanse til å flytte forbruk til billigere perioder. `price_coefficient_variation_%` viser prosentverdien, `price_spread` viser den absolutte prisspennet.",
|
"long_description": "Viser hvor mye strømprisene varierer gjennom dagen basert på spredningen (forskjellen mellom høyeste og laveste pris). Klassifisering: LOW = spredning < 5øre, MODERATE = 5-15øre, HIGH = 15-30øre, VERY HIGH = >30øre.",
|
||||||
"usage_tips": "Bruk dette for å avgjøre om optimalisering er verdt innsatsen. Ved lav volatilitet kan du kjøre enheter når som helst. Ved høy volatilitet sparer du merkbart ved å følge Best Price-perioder."
|
"usage_tips": "Bruk dette til å bestemme om prisbasert optimalisering er verdt det. For eksempel, med et balkongbatteri som har 15% effektivitetstap, er optimalisering kun meningsfull når volatiliteten er minst MODERATE. Opprett automatiseringer som sjekker volatilitet før planlegging av lade-/utladingssykluser."
|
||||||
},
|
},
|
||||||
"tomorrow_volatility": {
|
"tomorrow_volatility": {
|
||||||
"description": "Hvor mye strømprisene vil endre seg i morgen",
|
"description": "Prisvolatilitetsklassifisering for i morgen",
|
||||||
"long_description": "Viser om prisene i morgen blir stabile eller får store svingninger. Tilgjengelig når morgendagens data er publisert (vanligvis 13:00–14:00 CET). Lav volatilitet betyr jevne priser – timing er ikke kritisk. Høy volatilitet betyr tydelige prisforskjeller gjennom dagen – en god mulighet til å planlegge energikrevende oppgaver. `price_coefficient_variation_%` viser prosentverdien, `price_spread` viser den absolutte prisspennet.",
|
"long_description": "Viser hvor mye strømprisene vil variere gjennom morgendagen basert på spredningen (forskjellen mellom høyeste og laveste pris). Blir utilgjengelig til morgendagens data er publisert (typisk 13:00-14:00 CET).",
|
||||||
"usage_tips": "Bruk dette til å planlegge morgendagens forbruk. Høy volatilitet? Planlegg fleksible laster i Best Price-perioder. Lav volatilitet? Kjør enheter når det passer deg."
|
"usage_tips": "Bruk dette til forhåndsplanlegging av morgendagens energiforbruk. Hvis morgendagen har HIGH eller VERY HIGH volatilitet, er det verdt å optimalisere tidspunktet for energiforbruk. Hvis LOW, kan du kjøre enheter når som helst uten betydelige kostnadsforskjeller."
|
||||||
},
|
},
|
||||||
"next_24h_volatility": {
|
"next_24h_volatility": {
|
||||||
"description": "Hvor mye prisene endrer seg de neste 24 timene",
|
"description": "Prisvolatilitetsklassifisering for de rullerende neste 24 timene",
|
||||||
"long_description": "Viser prisvolatilitet for et rullerende 24-timers vindu fra nå (oppdateres hvert 15. minutt). Lav volatilitet betyr jevne priser. Høy volatilitet betyr merkbare prissvingninger og mulighet for optimalisering. I motsetning til i dag/i morgen-sensorer krysser denne daggrenser og gir en kontinuerlig fremoverskuende vurdering. `price_coefficient_variation_%` viser prosentverdien, `price_spread` viser den absolutte prisspennet.",
|
"long_description": "Viser hvor mye strømprisene varierer i de neste 24 timene fra nå (rullerende vindu). Dette krysser daggrenser og oppdateres hvert 15. minutt, og gir en fremoverskuende volatilitetsvurdering uavhengig av kalenderdager.",
|
||||||
"usage_tips": "Best for beslutninger i sanntid. Bruk når du planlegger batterilading eller andre fleksible laster som kan gå over midnatt. Gir et konsistent 24t-bilde uavhengig av kalenderdag."
|
"usage_tips": "Beste sensor for sanntids optimaliseringsbeslutninger. I motsetning til dagens/morgendagens sensorer som bytter ved midnatt, gir denne kontinuerlig 24t volatilitetsvurdering. Bruk til batteriladingsstrategier som spenner over daggrenser."
|
||||||
},
|
},
|
||||||
"today_tomorrow_volatility": {
|
"today_tomorrow_volatility": {
|
||||||
"description": "Kombinert prisvolatilitet for i dag og i morgen",
|
"description": "Kombinert prisvolatilitetsklassifisering for i dag og i morgen",
|
||||||
"long_description": "Viser samlet volatilitet når i dag og i morgen sees sammen (når morgendata er tilgjengelig). Viser om det finnes klare prisforskjeller over dagsgrensen. Faller tilbake til kun i dag hvis morgendata mangler. Nyttig for flerdagers optimalisering. `price_coefficient_variation_%` viser prosentverdien, `price_spread` viser den absolutte prisspennet.",
|
"long_description": "Viser volatilitet på tvers av både i dag og i morgen kombinert (når morgendagens data er tilgjengelig). Gir en utvidet visning av prisvariasjoner som spenner over opptil 48 timer. Faller tilbake til bare i dag når morgendagens data ikke er tilgjengelig ennå.",
|
||||||
"usage_tips": "Bruk for oppgaver som går over flere dager. Sjekk om prisforskjellene er store nok til å planlegge etter. De enkelte dagssensorene viser bidrag per dag om du trenger mer detalj."
|
"usage_tips": "Bruk dette for flersdagers planlegging og for å forstå om prismuligheter eksisterer på tvers av dags grensen. Attributtene 'today_volatility' og 'tomorrow_volatility' viser individuelle dagbidrag. Nyttig for planlegging av ladeøkter som kan strekke seg over midnatt."
|
||||||
},
|
},
|
||||||
"data_lifecycle_status": {
|
"data_lifecycle_status": {
|
||||||
"description": "Gjeldende tilstand for prisdatalivssyklus og hurtigbufring",
|
"description": "Gjeldende tilstand for prisdatalivssyklus og hurtigbufring",
|
||||||
|
|
@ -317,49 +304,39 @@
|
||||||
"usage_tips": "Bruk denne diagnosesensoren for å forstå dataferskhet og API-anropsmønstre. Sjekk 'cache_age'-attributtet for å se hvor gamle de nåværende dataene er. Overvåk 'next_api_poll' for å vite når neste oppdatering er planlagt. Bruk 'data_completeness' for å se om data for i går/i dag/i morgen er tilgjengelig. 'api_calls_today'-telleren hjelper med å spore API-bruk. Perfekt for feilsøking eller forståelse av integrasjonens oppførsel."
|
"usage_tips": "Bruk denne diagnosesensoren for å forstå dataferskhet og API-anropsmønstre. Sjekk 'cache_age'-attributtet for å se hvor gamle de nåværende dataene er. Overvåk 'next_api_poll' for å vite når neste oppdatering er planlagt. Bruk 'data_completeness' for å se om data for i går/i dag/i morgen er tilgjengelig. 'api_calls_today'-telleren hjelper med å spore API-bruk. Perfekt for feilsøking eller forståelse av integrasjonens oppførsel."
|
||||||
},
|
},
|
||||||
"best_price_end_time": {
|
"best_price_end_time": {
|
||||||
"description": "Total lengde på nåværende eller neste billigperiode (state i timer, attributt i minutter)",
|
"description": "Når gjeldende eller neste billigperiode slutter",
|
||||||
"long_description": "Viser hvor lenge billigperioden varer. State bruker timer (desimal) for lesbar UI; attributtet `period_duration_minutes` beholder avrundede minutter for automasjoner. Aktiv → varighet for gjeldende periode, ellers neste.",
|
"long_description": "Viser sluttidspunktet for gjeldende billigperiode når aktiv, eller slutten av neste periode når ingen periode er aktiv. Viser alltid en nyttig tidsreferanse for planlegging. Returnerer 'Ukjent' bare når ingen perioder er konfigurert.",
|
||||||
"usage_tips": "UI kan vise 1,5 t mens `period_duration_minutes` = 90 for automasjoner."
|
"usage_tips": "Bruk dette til å vise en nedtelling som 'Billigperiode slutter om 2 timer' (når aktiv) eller 'Neste billigperiode slutter kl 14:00' (når inaktiv). Home Assistant viser automatisk relativ tid for tidsstempelsensorer."
|
||||||
},
|
|
||||||
"best_price_period_duration": {
|
|
||||||
"description": "Lengde på gjeldende/neste billigperiode",
|
|
||||||
"long_description": "Total varighet av gjeldende eller neste billigperiode. State vises i timer (f.eks. 1,5 t) for enkel lesing i UI, mens attributtet `period_duration_minutes` gir samme verdi i minutter (f.eks. 90) for automasjoner. Denne verdien representerer den **fulle planlagte varigheten** av perioden og er konstant gjennom hele perioden, selv om gjenværende tid (remaining_minutes) reduseres.",
|
|
||||||
"usage_tips": "Kombiner med remaining_minutes for å beregne når langvarige enheter skal stoppes: Perioden startet for `period_duration_minutes - remaining_minutes` minutter siden. Dette attributtet støtter energioptimeringsstrategier ved å hjelpe til med å planlegge høyforbruksaktiviteter innenfor billige perioder."
|
|
||||||
},
|
},
|
||||||
"best_price_remaining_minutes": {
|
"best_price_remaining_minutes": {
|
||||||
"description": "Gjenværende tid i gjeldende billigperiode",
|
"description": "Gjenværende minutter i gjeldende billigperiode (0 når inaktiv)",
|
||||||
"long_description": "Viser hvor mye tid som gjenstår i gjeldende billigperiode. State vises i timer (f.eks. 0,75 t) for enkel lesing i dashboards, mens attributtet `remaining_minutes` gir samme tid i minutter (f.eks. 45) for automasjonsbetingelser. **Nedtellingstimer**: Denne verdien reduseres hvert minutt under en aktiv periode. Returnerer 0 når ingen billigperiode er aktiv. Oppdateres hvert minutt.",
|
"long_description": "Viser hvor mange minutter som er igjen i gjeldende billigperiode. Returnerer 0 når ingen periode er aktiv. Oppdateres hvert minutt. Sjekk binary_sensor.best_price_period for å se om en periode er aktiv.",
|
||||||
"usage_tips": "For automasjoner: Bruk attributtet `remaining_minutes` som 'Hvis remaining_minutes > 60, start oppvaskmaskinen nå (nok tid til å fullføre)' eller 'Hvis remaining_minutes < 15, fullfør gjeldende syklus snart'. UI viser brukervennlige timer (f.eks. 1,25 t). Verdi 0 indikerer ingen aktiv billigperiode."
|
"usage_tips": "Perfekt for automatiseringer: 'Hvis remaining_minutes > 0 OG remaining_minutes < 30, start vaskemaskin nå'. Verdien 0 gjør det enkelt å sjekke om en periode er aktiv (verdi > 0) eller ikke (verdi = 0)."
|
||||||
},
|
},
|
||||||
"best_price_progress": {
|
"best_price_progress": {
|
||||||
"description": "Fremdrift gjennom gjeldende billigperiode (0% når inaktiv)",
|
"description": "Fremdrift gjennom gjeldende billigperiode (0% når inaktiv)",
|
||||||
"long_description": "Viser fremdrift gjennom gjeldende billigperiode som 0-100%. Returnerer 0% når ingen periode er aktiv. Oppdateres hvert minutt. 0% betyr perioden nettopp startet, 100% betyr den slutter snart.",
|
"long_description": "Viser fremdrift gjennom gjeldende billigperiode som 0-100%. Returnerer 0% når ingen periode er aktiv. Oppdateres hvert minutt. 0% betyr periode nettopp startet, 100% betyr den snart slutter.",
|
||||||
"usage_tips": "Flott for visuelle fremgangsindikatorer. Bruk i automatiseringer: 'Hvis progress > 0 OG progress > 75, send varsel om at billigperioden snart slutter'. Verdi 0 indikerer ingen aktiv periode."
|
"usage_tips": "Flott for visuelle fremdriftslinjer. Bruk i automatiseringer: 'Hvis progress > 0 OG progress > 75, send varsel om at billigperiode snart slutter'. Verdi 0 indikerer ingen aktiv periode."
|
||||||
},
|
},
|
||||||
"best_price_next_start_time": {
|
"best_price_next_start_time": {
|
||||||
"description": "Total lengde på nåværende eller neste dyr-periode (state i timer, attributt i minutter)",
|
"description": "Når neste billigperiode starter",
|
||||||
"long_description": "Viser hvor lenge den dyre perioden varer. State bruker timer (desimal) for UI; attributtet `period_duration_minutes` beholder avrundede minutter for automasjoner. Aktiv → varighet for gjeldende periode, ellers neste.",
|
"long_description": "Viser når neste kommende billigperiode starter. Under en aktiv periode viser dette starten av NESTE periode etter den gjeldende. Returnerer 'Ukjent' bare når ingen fremtidige perioder er konfigurert.",
|
||||||
"usage_tips": "UI kan vise 0,75 t mens `period_duration_minutes` = 45 for automasjoner."
|
"usage_tips": "Alltid nyttig for planlegging: 'Neste billigperiode starter om 3 timer' (enten du er i en periode nå eller ikke). Kombiner med automatiseringer: 'Når neste starttid er om 10 minutter, send varsel for å forberede vaskemaskin'."
|
||||||
},
|
},
|
||||||
"best_price_next_in_minutes": {
|
"best_price_next_in_minutes": {
|
||||||
"description": "Tid til neste billigperiode",
|
"description": "Minutter til neste billigperiode starter (0 ved overgang)",
|
||||||
"long_description": "Viser hvor lenge til neste billigperiode. State vises i timer (f.eks. 2,25 t) for dashboards, mens attributtet `next_in_minutes` gir minutter (f.eks. 135) for automasjonsbetingelser. Under en aktiv periode viser dette tiden til perioden ETTER den gjeldende. Returnerer 0 under korte overgangsmomenter. Oppdateres hvert minutt.",
|
"long_description": "Viser minutter til neste billigperiode starter. Under en aktiv periode viser dette tiden til perioden ETTER den gjeldende. Returnerer 0 under korte overgangsmomenter. Oppdateres hvert minutt.",
|
||||||
"usage_tips": "For automasjoner: Bruk attributtet `next_in_minutes` som 'Hvis next_in_minutes > 0 OG next_in_minutes < 15, vent før start av oppvaskmaskin'. Verdi > 0 indikerer alltid at en fremtidig periode er planlagt."
|
"usage_tips": "Perfekt for 'vent til billigperiode' automatiseringer: 'Hvis next_in_minutes > 0 OG next_in_minutes < 15, vent før oppvaskmaskin startes'. Verdi > 0 indikerer alltid at en fremtidig periode er planlagt."
|
||||||
},
|
},
|
||||||
"peak_price_end_time": {
|
"peak_price_end_time": {
|
||||||
"description": "Tid til neste dyr-periode (state i timer, attributt i minutter)",
|
"description": "Når gjeldende eller neste dyrperiode slutter",
|
||||||
"long_description": "Viser hvor lenge til neste dyre periode starter. State bruker timer (desimal); attributtet `next_in_minutes` beholder avrundede minutter for automasjoner. Under aktiv periode viser dette tiden til perioden etter den nåværende. 0 i korte overgangsøyeblikk. Oppdateres hvert minutt.",
|
"long_description": "Viser sluttidspunktet for gjeldende dyrperiode når aktiv, eller slutten av neste periode når ingen periode er aktiv. Viser alltid en nyttig tidsreferanse for planlegging. Returnerer 'Ukjent' bare når ingen perioder er konfigurert.",
|
||||||
"usage_tips": "Bruk `next_in_minutes` i automasjoner (f.eks. < 10) mens state er lett å lese i timer."
|
"usage_tips": "Bruk dette til å vise 'Dyrperiode slutter om 1 time' (når aktiv) eller 'Neste dyrperiode slutter kl 18:00' (når inaktiv). Kombiner med automatiseringer for å gjenoppta drift etter topp."
|
||||||
},
|
|
||||||
"peak_price_period_duration": {
|
|
||||||
"description": "Lengde på gjeldende/neste dyr periode",
|
|
||||||
"long_description": "Total varighet av gjeldende eller neste dyre periode. State vises i timer (f.eks. 1,5 t) for enkel lesing i UI, mens attributtet `period_duration_minutes` gir samme verdi i minutter (f.eks. 90) for automasjoner. Denne verdien representerer den **fulle planlagte varigheten** av perioden og er konstant gjennom hele perioden, selv om gjenværende tid (remaining_minutes) reduseres.",
|
|
||||||
"usage_tips": "Kombiner med remaining_minutes for å beregne når langvarige enheter skal stoppes: Perioden startet for `period_duration_minutes - remaining_minutes` minutter siden. Dette attributtet støtter energisparingsstrategier ved å hjelpe til med å planlegge høyforbruksaktiviteter utenfor dyre perioder."
|
|
||||||
},
|
},
|
||||||
"peak_price_remaining_minutes": {
|
"peak_price_remaining_minutes": {
|
||||||
"description": "Gjenværende tid i gjeldende dyre periode",
|
"description": "Gjenværende minutter i gjeldende dyrperiode (0 når inaktiv)",
|
||||||
"long_description": "Viser hvor mye tid som gjenstår i gjeldende dyre periode. State vises i timer (f.eks. 0,75 t) for enkel lesing i dashboards, mens attributtet `remaining_minutes` gir samme tid i minutter (f.eks. 45) for automasjonsbetingelser. **Nedtellingstimer**: Denne verdien reduseres hvert minutt under en aktiv periode. Returnerer 0 når ingen dyr periode er aktiv. Oppdateres hvert minutt.",
|
"long_description": "Viser hvor mange minutter som er igjen i gjeldende dyrperiode. Returnerer 0 når ingen periode er aktiv. Oppdateres hvert minutt. Sjekk binary_sensor.peak_price_period for å se om en periode er aktiv.",
|
||||||
"usage_tips": "For automasjoner: Bruk attributtet `remaining_minutes` som 'Hvis remaining_minutes > 60, avbryt utsatt ladeøkt' eller 'Hvis remaining_minutes < 15, fortsett normal drift snart'. UI viser brukervennlige timer (f.eks. 1,0 t). Verdi 0 indikerer ingen aktiv dyr periode."
|
"usage_tips": "Bruk i automatiseringer: 'Hvis remaining_minutes > 60, avbryt utsatt ladeøkt'. Verdi 0 gjør det enkelt å skille mellom aktive (verdi > 0) og inaktive (verdi = 0) perioder."
|
||||||
},
|
},
|
||||||
"peak_price_progress": {
|
"peak_price_progress": {
|
||||||
"description": "Fremdrift gjennom gjeldende dyrperiode (0% når inaktiv)",
|
"description": "Fremdrift gjennom gjeldende dyrperiode (0% når inaktiv)",
|
||||||
|
|
@ -372,9 +349,19 @@
|
||||||
"usage_tips": "Alltid nyttig for planlegging: 'Neste dyrperiode starter om 2 timer'. Automatisering: 'Når neste starttid er om 30 minutter, reduser varmetemperatur forebyggende'."
|
"usage_tips": "Alltid nyttig for planlegging: 'Neste dyrperiode starter om 2 timer'. Automatisering: 'Når neste starttid er om 30 minutter, reduser varmetemperatur forebyggende'."
|
||||||
},
|
},
|
||||||
"peak_price_next_in_minutes": {
|
"peak_price_next_in_minutes": {
|
||||||
"description": "Tid til neste dyre periode",
|
"description": "Minutter til neste dyrperiode starter (0 ved overgang)",
|
||||||
"long_description": "Viser hvor lenge til neste dyre periode starter. State vises i timer (f.eks. 0,5 t) for dashboards, mens attributtet `next_in_minutes` gir minutter (f.eks. 30) for automasjonsbetingelser. Under en aktiv periode viser dette tiden til perioden ETTER den gjeldende. Returnerer 0 under korte overgangsmomenter. Oppdateres hvert minutt.",
|
"long_description": "Viser minutter til neste dyrperiode starter. Under en aktiv periode viser dette tiden til perioden ETTER den gjeldende. Returnerer 0 under korte overgangsmomenter. Oppdateres hvert minutt.",
|
||||||
"usage_tips": "For automasjoner: Bruk attributtet `next_in_minutes` som 'Hvis next_in_minutes > 0 OG next_in_minutes < 10, fullfør gjeldende ladesyklus nå før prisene øker'. Verdi > 0 indikerer alltid at en fremtidig dyr periode er planlagt."
|
"usage_tips": "Forebyggende automatisering: 'Hvis next_in_minutes > 0 OG next_in_minutes < 10, fullfør gjeldende ladesyklus nå før prisene øker'."
|
||||||
|
},
|
||||||
|
"best_price_period_duration": {
|
||||||
|
"description": "Total varighet av gjeldende eller neste billigperiode i minutter",
|
||||||
|
"long_description": "Viser den totale varigheten av billigperioden i minutter. Under en aktiv periode viser dette hele varigheten av gjeldende periode. Når ingen periode er aktiv, viser dette varigheten av neste kommende periode. Eksempel: '90 minutter' for en 1,5-timers periode.",
|
||||||
|
"usage_tips": "Kombiner med remaining_minutes for å planlegge oppgaver: 'Hvis duration = 120 OG remaining_minutes > 90, start vaskemaskin (nok tid til å fullføre)'. Nyttig for å forstå om perioder er lange nok for strømkrevende oppgaver."
|
||||||
|
},
|
||||||
|
"peak_price_period_duration": {
|
||||||
|
"description": "Total varighet av gjeldende eller neste dyrperiode i minutter",
|
||||||
|
"long_description": "Viser den totale varigheten av dyrperioden i minutter. Under en aktiv periode viser dette hele varigheten av gjeldende periode. Når ingen periode er aktiv, viser dette varigheten av neste kommende periode. Eksempel: '60 minutter' for en 1-times periode.",
|
||||||
|
"usage_tips": "Bruk til å planlegge energibesparelsestiltak: 'Hvis duration > 120, reduser varmetemperatur mer aggressivt (lang dyr periode)'. Hjelper med å vurdere hvor mye energiforbruk må reduseres."
|
||||||
},
|
},
|
||||||
"home_type": {
|
"home_type": {
|
||||||
"description": "Type bolig (leilighet, hus osv.)",
|
"description": "Type bolig (leilighet, hus osv.)",
|
||||||
|
|
@ -450,11 +437,6 @@
|
||||||
"description": "Dataeksport for dashboardintegrasjoner",
|
"description": "Dataeksport for dashboardintegrasjoner",
|
||||||
"long_description": "Denne sensoren kaller get_chartdata-tjenesten med din konfigurerte YAML-konfigurasjon og eksponerer resultatet som entitetsattributter. Status viser 'ready' når data er tilgjengelig, 'error' ved feil, eller 'pending' før første kall. Perfekt for dashboardintegrasjoner som ApexCharts som trenger å lese prisdata fra entitetsattributter.",
|
"long_description": "Denne sensoren kaller get_chartdata-tjenesten med din konfigurerte YAML-konfigurasjon og eksponerer resultatet som entitetsattributter. Status viser 'ready' når data er tilgjengelig, 'error' ved feil, eller 'pending' før første kall. Perfekt for dashboardintegrasjoner som ApexCharts som trenger å lese prisdata fra entitetsattributter.",
|
||||||
"usage_tips": "Konfigurer YAML-parametrene i integrasjonsinnstillingene for å matche get_chartdata-tjenestekallet ditt. Sensoren vil automatisk oppdatere når prisdata oppdateres (typisk etter midnatt og når morgendagens data ankommer). Få tilgang til tjenesteresponsdataene direkte fra entitetens attributter - strukturen matcher nøyaktig det get_chartdata returnerer."
|
"usage_tips": "Konfigurer YAML-parametrene i integrasjonsinnstillingene for å matche get_chartdata-tjenestekallet ditt. Sensoren vil automatisk oppdatere når prisdata oppdateres (typisk etter midnatt og når morgendagens data ankommer). Få tilgang til tjenesteresponsdataene direkte fra entitetens attributter - strukturen matcher nøyaktig det get_chartdata returnerer."
|
||||||
},
|
|
||||||
"chart_metadata": {
|
|
||||||
"description": "Lettvekts metadata for diagramkonfigurasjon",
|
|
||||||
"long_description": "Gir essensielle diagramkonfigurasjonsverdier som sensorattributter. Nyttig for ethvert diagramkort som trenger Y-aksegrenser. Sensoren kaller get_chartdata med kun-metadata-modus (ingen databehandling) og trekker ut: yaxis_min, yaxis_max (foreslått Y-akseområde for optimal skalering). Status reflekterer tjenestekallresultatet: 'ready' ved suksess, 'error' ved feil, 'pending' under initialisering.",
|
|
||||||
"usage_tips": "Konfigurer via configuration.yaml under tibber_prices.chart_metadata_config (valgfritt: day, subunit_currency, resolution). Sensoren oppdateres automatisk når prisdata endres. Få tilgang til metadata fra attributter: yaxis_min, yaxis_max. Bruk med config-template-card eller ethvert verktøy som leser entitetsattributter - perfekt for dynamisk diagramkonfigurasjon uten manuelle beregninger."
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"binary_sensor": {
|
"binary_sensor": {
|
||||||
|
|
@ -487,80 +469,11 @@
|
||||||
"description": "Om sanntidsforbruksovervåking er aktiv",
|
"description": "Om sanntidsforbruksovervåking er aktiv",
|
||||||
"long_description": "Indikerer om sanntidsovervåking av strømforbruk er aktivert og aktiv for ditt Tibber-hjem. Dette krever kompatibel målehardware (f.eks. Tibber Pulse) og et aktivt abonnement.",
|
"long_description": "Indikerer om sanntidsovervåking av strømforbruk er aktivert og aktiv for ditt Tibber-hjem. Dette krever kompatibel målehardware (f.eks. Tibber Pulse) og et aktivt abonnement.",
|
||||||
"usage_tips": "Bruk dette for å bekrefte at sanntidsforbruksdata er tilgjengelig. Aktiver varsler hvis dette endres til 'av' uventet, noe som indikerer potensielle maskinvare- eller tilkoblingsproblemer."
|
"usage_tips": "Bruk dette for å bekrefte at sanntidsforbruksdata er tilgjengelig. Aktiver varsler hvis dette endres til 'av' uventet, noe som indikerer potensielle maskinvare- eller tilkoblingsproblemer."
|
||||||
}
|
|
||||||
},
|
|
||||||
"number": {
|
|
||||||
"best_price_flex_override": {
|
|
||||||
"description": "Maksimal prosent over daglig minimumspris som intervaller kan ha og fortsatt kvalifisere som 'beste pris'. Anbefalt: 15-20 med lemping aktivert (standard), eller 25-35 uten lemping. Maksimum: 50 (tak for pålitelig periodedeteksjon).",
|
|
||||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Fleksibilitet'-innstillingen fra alternativer-dialogen for beste pris-periodeberegninger.",
|
|
||||||
"usage_tips": "Aktiver denne entiteten for å dynamisk justere beste pris-deteksjon via automatiseringer, f.eks. høyere fleksibilitet for kritiske laster eller strengere krav for fleksible apparater."
|
|
||||||
},
|
},
|
||||||
"best_price_min_distance_override": {
|
"chart_data_export": {
|
||||||
"description": "Minimum prosentavstand under daglig gjennomsnitt. Intervaller må være så langt under gjennomsnittet for å kvalifisere som 'beste pris'. Hjelper med å skille ekte lavprisperioder fra gjennomsnittspriser.",
|
"description": "Dataeksport for dashboardintegrasjoner",
|
||||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Minimumsavstand'-innstillingen fra alternativer-dialogen for beste pris-periodeberegninger.",
|
"long_description": "Denne binærsensoren kaller get_chartdata-tjenesten for å eksportere prisdata i formater som er kompatible med ApexCharts og andre dashboardverktøy. Dataeksporten inkluderer historiske og fremtidsrettede prisdata strukturert for visualisering.",
|
||||||
"usage_tips": "Øk verdien for strengere beste pris-kriterier. Reduser hvis for få perioder blir oppdaget."
|
"usage_tips": "Konfigurer YAML-parametrene i integrasjonsalternativene. Bruk denne sensoren til å trigge dataeksporthendelser for dashboards. Når den slås på, eksporteres data til en fil eller tjeneste som er konfigurert for integrering med ApexCharts eller tilsvarende visualiseringsverktøy."
|
||||||
},
|
|
||||||
"best_price_min_period_length_override": {
|
|
||||||
"description": "Minimum periodelengde i 15-minutters intervaller. Perioder kortere enn dette blir ikke rapportert. Eksempel: 2 = minimum 30 minutter.",
|
|
||||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Minimum periodelengde'-innstillingen fra alternativer-dialogen for beste pris-periodeberegninger.",
|
|
||||||
"usage_tips": "Juster til typisk apparatkjøretid: 2 (30 min) for hurtigprogrammer, 4-8 (1-2 timer) for normale sykluser, 8+ for lange ECO-programmer."
|
|
||||||
},
|
|
||||||
"best_price_min_periods_override": {
|
|
||||||
"description": "Minimum antall beste pris-perioder å finne daglig. Når lemping er aktivert, vil systemet automatisk justere kriterier for å oppnå dette antallet.",
|
|
||||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Minimum perioder'-innstillingen fra alternativer-dialogen for beste pris-periodeberegninger.",
|
|
||||||
"usage_tips": "Sett dette til antall tidskritiske oppgaver du har daglig. Eksempel: 2 for to vaskemaskinkjøringer."
|
|
||||||
},
|
|
||||||
"best_price_relaxation_attempts_override": {
|
|
||||||
"description": "Antall forsøk på å gradvis lempe kriteriene for å oppnå minimum periodeantall. Hvert forsøk øker fleksibiliteten med 3 prosent. Ved 0 brukes kun basiskriterier.",
|
|
||||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Lemping forsøk'-innstillingen fra alternativer-dialogen for beste pris-periodeberegninger.",
|
|
||||||
"usage_tips": "Høyere verdier gjør periodedeteksjon mer adaptiv for dager med stabile priser. Sett til 0 for å tvinge strenge kriterier uten lemping."
|
|
||||||
},
|
|
||||||
"best_price_gap_count_override": {
|
|
||||||
"description": "Maksimalt antall dyrere intervaller som kan tillates mellom billige intervaller mens de fortsatt regnes som en sammenhengende periode. Ved 0 må billige intervaller være påfølgende.",
|
|
||||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Gaptoleranse'-innstillingen fra alternativer-dialogen for beste pris-periodeberegninger.",
|
|
||||||
"usage_tips": "Øk dette for apparater med variabel last (f.eks. varmepumper) som kan tåle korte dyrere intervaller. Sett til 0 for kontinuerlige billige perioder."
|
|
||||||
},
|
|
||||||
"peak_price_flex_override": {
|
|
||||||
"description": "Maksimal prosent under daglig maksimumspris som intervaller kan ha og fortsatt kvalifisere som 'topppris'. Samme anbefalinger som for beste pris-fleksibilitet.",
|
|
||||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Fleksibilitet'-innstillingen fra alternativer-dialogen for topppris-periodeberegninger.",
|
|
||||||
"usage_tips": "Bruk dette for å justere topppris-terskelen ved kjøretid for automatiseringer som unngår forbruk under dyre timer."
|
|
||||||
},
|
|
||||||
"peak_price_min_distance_override": {
|
|
||||||
"description": "Minimum prosentavstand over daglig gjennomsnitt. Intervaller må være så langt over gjennomsnittet for å kvalifisere som 'topppris'.",
|
|
||||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Minimumsavstand'-innstillingen fra alternativer-dialogen for topppris-periodeberegninger.",
|
|
||||||
"usage_tips": "Øk verdien for kun å fange ekstreme pristopper. Reduser for å inkludere flere høypristider."
|
|
||||||
},
|
|
||||||
"peak_price_min_period_length_override": {
|
|
||||||
"description": "Minimum periodelengde i 15-minutters intervaller for topppriser. Kortere pristopper rapporteres ikke som perioder.",
|
|
||||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Minimum periodelengde'-innstillingen fra alternativer-dialogen for topppris-periodeberegninger.",
|
|
||||||
"usage_tips": "Kortere verdier fanger korte pristopper. Lengre verdier fokuserer på vedvarende høyprisperioder."
|
|
||||||
},
|
|
||||||
"peak_price_min_periods_override": {
|
|
||||||
"description": "Minimum antall topppris-perioder å finne daglig.",
|
|
||||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Minimum perioder'-innstillingen fra alternativer-dialogen for topppris-periodeberegninger.",
|
|
||||||
"usage_tips": "Sett dette basert på hvor mange høyprisperioder du vil fange per dag for automatiseringer."
|
|
||||||
},
|
|
||||||
"peak_price_relaxation_attempts_override": {
|
|
||||||
"description": "Antall forsøk på å lempe kriteriene for å oppnå minimum antall topppris-perioder.",
|
|
||||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Lemping forsøk'-innstillingen fra alternativer-dialogen for topppris-periodeberegninger.",
|
|
||||||
"usage_tips": "Øk dette hvis ingen perioder blir funnet på dager med stabile priser. Sett til 0 for å tvinge strenge kriterier."
|
|
||||||
},
|
|
||||||
"peak_price_gap_count_override": {
|
|
||||||
"description": "Maksimalt antall billigere intervaller som kan tillates mellom dyre intervaller mens de fortsatt regnes som en topppris-periode.",
|
|
||||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Gaptoleranse'-innstillingen fra alternativer-dialogen for topppris-periodeberegninger.",
|
|
||||||
"usage_tips": "Høyere verdier fanger lengre høyprisperioder selv med korte prisdykk. Sett til 0 for strengt sammenhengende topppriser."
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"switch": {
|
|
||||||
"best_price_enable_relaxation_override": {
|
|
||||||
"description": "Når aktivert, lempes kriteriene automatisk for å oppnå minimum periodeantall. Når deaktivert, rapporteres kun perioder som oppfyller strenge kriterier (muligens null perioder på dager med stabile priser).",
|
|
||||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Oppnå minimumsantall'-innstillingen fra alternativer-dialogen for beste pris-periodeberegninger.",
|
|
||||||
"usage_tips": "Aktiver dette for garanterte daglige automatiseringsmuligheter. Deaktiver hvis du kun vil ha virkelig billige perioder, selv om det betyr ingen perioder på noen dager."
|
|
||||||
},
|
|
||||||
"peak_price_enable_relaxation_override": {
|
|
||||||
"description": "Når aktivert, lempes kriteriene automatisk for å oppnå minimum periodeantall. Når deaktivert, rapporteres kun ekte pristopper.",
|
|
||||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Oppnå minimumsantall'-innstillingen fra alternativer-dialogen for topppris-periodeberegninger.",
|
|
||||||
"usage_tips": "Aktiver dette for konsistente topppris-varsler. Deaktiver for kun å fange ekstreme pristopper."
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"home_types": {
|
"home_types": {
|
||||||
|
|
|
||||||
|
|
@ -1,40 +1,27 @@
|
||||||
{
|
{
|
||||||
"apexcharts": {
|
"apexcharts": {
|
||||||
"title_rating_level": "Prijsfasen dagverloop",
|
"title_rating_level": "Prijsfasen dagelijkse voortgang",
|
||||||
"title_level": "Prijsniveau",
|
"title_level": "Prijsniveau"
|
||||||
"hourly_suffix": "(Ø per uur)",
|
|
||||||
"best_price_period_name": "Beste prijsperiode",
|
|
||||||
"peak_price_period_name": "Piekprijsperiode",
|
|
||||||
"notification": {
|
|
||||||
"metadata_sensor_unavailable": {
|
|
||||||
"title": "Tibber Prices: ApexCharts YAML gegenereerd met beperkte functionaliteit",
|
|
||||||
"message": "Je hebt zojuist een ApexCharts-kaartconfiguratie gegenereerd via Ontwikkelaarstools. De grafiek-metadata-sensor is momenteel uitgeschakeld, dus de gegenereerde YAML toont alleen **basisfunctionaliteit** (auto-schaal as, vaste verloop op 50%).\n\n**Voor volledige functionaliteit** (geoptimaliseerde schaling, dynamische verloopkleuren):\n1. [Open Tibber Prices-integratie](https://my.home-assistant.io/redirect/integration/?domain=tibber_prices)\n2. Schakel de 'Chart Metadata'-sensor in\n3. **Genereer de YAML opnieuw** via Ontwikkelaarstools\n4. **Vervang de oude YAML** in je dashboard door de nieuwe versie\n\n⚠️ Alleen de sensor inschakelen is niet genoeg - je moet de YAML opnieuw genereren en vervangen!"
|
|
||||||
},
|
|
||||||
"missing_cards": {
|
|
||||||
"title": "Tibber Prices: ApexCharts YAML kan niet worden gebruikt",
|
|
||||||
"message": "Je hebt zojuist een ApexCharts-kaartconfiguratie gegenereerd via Ontwikkelaarstools, maar de gegenereerde YAML **zal niet werken** omdat vereiste aangepaste kaarten ontbreken.\n\n**Ontbrekende kaarten:**\n{cards}\n\n**Om de gegenereerde YAML te gebruiken:**\n1. Klik op de bovenstaande links om de ontbrekende kaarten te installeren vanuit HACS\n2. Herstart Home Assistant (soms nodig)\n3. **Genereer de YAML opnieuw** via Ontwikkelaarstools\n4. Voeg de YAML toe aan je dashboard\n\n⚠️ De huidige YAML-code werkt niet totdat alle kaarten zijn geïnstalleerd!"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"sensor": {
|
"sensor": {
|
||||||
"current_interval_price": {
|
"current_interval_price": {
|
||||||
"description": "De huidige elektriciteitsprijs per kWh",
|
"description": "De huidige elektriciteitsprijs per kWh",
|
||||||
"long_description": "Toont de huidige prijs per kWh van je Tibber-abonnement",
|
"long_description": "Toont de huidige prijs per kWh van uw Tibber-abonnement",
|
||||||
"usage_tips": "Gebruik dit om prijzen bij te houden of om automatiseringen te maken die worden uitgevoerd wanneer elektriciteit goedkoop is"
|
"usage_tips": "Gebruik dit om prijzen bij te houden of om automatiseringen te maken die worden uitgevoerd wanneer elektriciteit goedkoop is"
|
||||||
},
|
},
|
||||||
"current_interval_price_base": {
|
"current_interval_price_major": {
|
||||||
"description": "Huidige elektriciteitsprijs in hoofdvaluta (EUR/kWh, NOK/kWh, enz.) voor Energie-dashboard",
|
"description": "Huidige elektriciteitsprijs in hoofdvaluta (EUR/kWh, NOK/kWh, enz.) voor Energie-dashboard",
|
||||||
"long_description": "Toont de huidige prijs per kWh in hoofdvaluta-eenheden (bijv. EUR/kWh in plaats van ct/kWh, NOK/kWh in plaats van øre/kWh). Deze sensor is speciaal ontworpen voor gebruik met het Energie-dashboard van Home Assistant, dat prijzen in standaard valuta-eenheden vereist.",
|
"long_description": "Toont de huidige prijs per kWh in hoofdvaluta-eenheden (bijv. EUR/kWh in plaats van ct/kWh, NOK/kWh in plaats van øre/kWh). Deze sensor is speciaal ontworpen voor gebruik met het Energie-dashboard van Home Assistant, dat prijzen in standaard valuta-eenheden vereist.",
|
||||||
"usage_tips": "Gebruik deze sensor bij het configureren van het Energie-dashboard onder Instellingen → Dashboards → Energie. Selecteer deze sensor als 'Entiteit met huidige prijs' om automatisch je energiekosten te berekenen. Het Energie-dashboard vermenigvuldigt je energieverbruik (kWh) met deze prijs om totale kosten weer te geven."
|
"usage_tips": "Gebruik deze sensor bij het configureren van het Energie-dashboard onder Instellingen → Dashboards → Energie. Selecteer deze sensor als 'Entiteit met huidige prijs' om automatisch je energiekosten te berekenen. Het Energie-dashboard vermenigvuldigt je energieverbruik (kWh) met deze prijs om totale kosten weer te geven."
|
||||||
},
|
},
|
||||||
"next_interval_price": {
|
"next_interval_price": {
|
||||||
"description": "De volgende interval elektriciteitsprijs per kWh",
|
"description": "De volgende interval elektriciteitsprijs per kWh",
|
||||||
"long_description": "Toont de prijs voor het volgende 15-minuten interval van je Tibber-abonnement",
|
"long_description": "Toont de prijs voor het volgende 15-minuten interval van uw Tibber-abonnement",
|
||||||
"usage_tips": "Gebruik dit om je voor te bereiden op aanstaande prijswijzigingen of om apparaten te plannen om tijdens goedkopere intervallen te draaien"
|
"usage_tips": "Gebruik dit om u voor te bereiden op aanstaande prijswijzigingen of om apparaten te plannen om tijdens goedkopere intervallen te draaien"
|
||||||
},
|
},
|
||||||
"previous_interval_price": {
|
"previous_interval_price": {
|
||||||
"description": "De vorige interval elektriciteitsprijs per kWh",
|
"description": "De vorige interval elektriciteitsprijs per kWh",
|
||||||
"long_description": "Toont de prijs voor het vorige 15-minuten interval van je Tibber-abonnement",
|
"long_description": "Toont de prijs voor het vorige 15-minuten interval van uw Tibber-abonnement",
|
||||||
"usage_tips": "Gebruik dit om eerdere prijswijzigingen te bekijken of prijsgeschiedenis bij te houden"
|
"usage_tips": "Gebruik dit om eerdere prijswijzigingen te bekijken of prijsgeschiedenis bij te houden"
|
||||||
},
|
},
|
||||||
"current_hour_average_price": {
|
"current_hour_average_price": {
|
||||||
|
|
@ -49,33 +36,33 @@
|
||||||
},
|
},
|
||||||
"lowest_price_today": {
|
"lowest_price_today": {
|
||||||
"description": "De laagste elektriciteitsprijs voor vandaag per kWh",
|
"description": "De laagste elektriciteitsprijs voor vandaag per kWh",
|
||||||
"long_description": "Toont de laagste prijs per kWh voor de huidige dag van je Tibber-abonnement",
|
"long_description": "Toont de laagste prijs per kWh voor de huidige dag van uw Tibber-abonnement",
|
||||||
"usage_tips": "Gebruik dit om huidige prijzen te vergelijken met de goedkoopste tijd van de dag"
|
"usage_tips": "Gebruik dit om huidige prijzen te vergelijken met de goedkoopste tijd van de dag"
|
||||||
},
|
},
|
||||||
"highest_price_today": {
|
"highest_price_today": {
|
||||||
"description": "De hoogste elektriciteitsprijs voor vandaag per kWh",
|
"description": "De hoogste elektriciteitsprijs voor vandaag per kWh",
|
||||||
"long_description": "Toont de hoogste prijs per kWh voor de huidige dag van je Tibber-abonnement",
|
"long_description": "Toont de hoogste prijs per kWh voor de huidige dag van uw Tibber-abonnement",
|
||||||
"usage_tips": "Gebruik dit om te voorkomen dat je apparaten draait tijdens piekprijstijden"
|
"usage_tips": "Gebruik dit om te voorkomen dat u apparaten draait tijdens piekprijstijden"
|
||||||
},
|
},
|
||||||
"average_price_today": {
|
"average_price_today": {
|
||||||
"description": "Typische elektriciteitsprijs voor vandaag per kWh (configureerbare weergave)",
|
"description": "De gemiddelde elektriciteitsprijs voor vandaag per kWh",
|
||||||
"long_description": "Toont de prijs per kWh voor de huidige dag van je Tibber-abonnement. **Standaard toont de status de mediaan** (resistent tegen extreme prijspieken, toont typisch prijsniveau). Je kunt dit wijzigen in de integratie-instellingen om het rekenkundig gemiddelde te tonen. De alternatieve waarde is beschikbaar als attribuut.",
|
"long_description": "Toont de gemiddelde prijs per kWh voor de huidige dag van uw Tibber-abonnement",
|
||||||
"usage_tips": "Gebruik dit als basislijn voor het vergelijken van huidige prijzen. Voor berekeningen gebruik: {{ state_attr('sensor.average_price_today', 'price_mean') }}"
|
"usage_tips": "Gebruik dit als basislijn voor het vergelijken van huidige prijzen"
|
||||||
},
|
},
|
||||||
"lowest_price_tomorrow": {
|
"lowest_price_tomorrow": {
|
||||||
"description": "De laagste elektriciteitsprijs voor morgen per kWh",
|
"description": "De laagste elektriciteitsprijs voor morgen per kWh",
|
||||||
"long_description": "Toont de laagste prijs per kWh voor morgen van je Tibber-abonnement. Deze sensor wordt niet beschikbaar totdat de gegevens van morgen door Tibber worden gepubliceerd (meestal rond 13:00-14:00 CET).",
|
"long_description": "Toont de laagste prijs per kWh voor morgen van uw Tibber-abonnement. Deze sensor wordt niet beschikbaar totdat de gegevens van morgen door Tibber worden gepubliceerd (meestal rond 13:00-14:00 CET).",
|
||||||
"usage_tips": "Gebruik dit om energie-intensieve activiteiten te plannen voor de goedkoopste tijd van morgen. Perfect voor vooraf plannen van verwarming, EV-laden of apparaten."
|
"usage_tips": "Gebruik dit om energie-intensieve activiteiten te plannen voor de goedkoopste tijd van morgen. Perfect voor vooraf plannen van verwarming, EV-laden of apparaten."
|
||||||
},
|
},
|
||||||
"highest_price_tomorrow": {
|
"highest_price_tomorrow": {
|
||||||
"description": "De hoogste elektriciteitsprijs voor morgen per kWh",
|
"description": "De hoogste elektriciteitsprijs voor morgen per kWh",
|
||||||
"long_description": "Toont de hoogste prijs per kWh voor morgen van je Tibber-abonnement. Deze sensor wordt niet beschikbaar totdat de gegevens van morgen door Tibber worden gepubliceerd (meestal rond 13:00-14:00 CET).",
|
"long_description": "Toont de hoogste prijs per kWh voor morgen van uw Tibber-abonnement. Deze sensor wordt niet beschikbaar totdat de gegevens van morgen door Tibber worden gepubliceerd (meestal rond 13:00-14:00 CET).",
|
||||||
"usage_tips": "Gebruik dit om te voorkomen dat je apparaten draait tijdens de piekprijstijden van morgen. Handig voor het plannen rond dure perioden."
|
"usage_tips": "Gebruik dit om te voorkomen dat u apparaten draait tijdens de piekprijstijden van morgen. Handig voor het plannen rond dure perioden."
|
||||||
},
|
},
|
||||||
"average_price_tomorrow": {
|
"average_price_tomorrow": {
|
||||||
"description": "Typische elektriciteitsprijs voor morgen per kWh (configureerbare weergave)",
|
"description": "De gemiddelde elektriciteitsprijs voor morgen per kWh",
|
||||||
"long_description": "Toont de prijs per kWh voor morgen van je Tibber-abonnement. **Standaard toont de status de mediaan** (resistent tegen extreme prijspieken). Je kunt dit wijzigen in de integratie-instellingen om het rekenkundig gemiddelde te tonen. De alternatieve waarde is beschikbaar als attribuut. Deze sensor wordt niet beschikbaar totdat de gegevens van morgen door Tibber worden gepubliceerd (meestal rond 13:00-14:00 CET).",
|
"long_description": "Toont de gemiddelde prijs per kWh voor morgen van uw Tibber-abonnement. Deze sensor wordt niet beschikbaar totdat de gegevens van morgen door Tibber worden gepubliceerd (meestal rond 13:00-14:00 CET).",
|
||||||
"usage_tips": "Gebruik dit als basislijn voor het vergelijken van prijzen van morgen en het plannen van verbruik. Vergelijk met de mediaan van vandaag om te zien of morgen over het algemeen duurder of goedkoper wordt."
|
"usage_tips": "Gebruik dit als basislijn voor het vergelijken van prijzen van morgen en het plannen van verbruik. Vergelijk met het gemiddelde van vandaag om te zien of morgen over het algemeen duurder of goedkoper wordt."
|
||||||
},
|
},
|
||||||
"yesterday_price_level": {
|
"yesterday_price_level": {
|
||||||
"description": "Geaggregeerd prijsniveau voor gisteren",
|
"description": "Geaggregeerd prijsniveau voor gisteren",
|
||||||
|
|
@ -94,48 +81,48 @@
|
||||||
},
|
},
|
||||||
"yesterday_price_rating": {
|
"yesterday_price_rating": {
|
||||||
"description": "Geaggregeerde prijsbeoordeling voor gisteren",
|
"description": "Geaggregeerde prijsbeoordeling voor gisteren",
|
||||||
"long_description": "Toont de geaggregeerde prijsbeoordeling (laag/normaal/hoog) voor alle intervallen van gisteren, gebaseerd op jouw geconfigureerde drempelwaarden. Gebruikt dezelfde logica als de uursensoren om de totale beoordeling voor de hele dag te bepalen.",
|
"long_description": "Toont de geaggregeerde prijsbeoordeling (laag/normaal/hoog) voor alle intervallen van gisteren, gebaseerd op uw geconfigureerde drempelwaarden. Gebruikt dezelfde logica als de uursensoren om de totale beoordeling voor de hele dag te bepalen.",
|
||||||
"usage_tips": "Gebruik dit om de prijssituatie van gisteren te begrijpen ten opzichte van jouw persoonlijke drempelwaarden. Vergelijk met vandaag voor trendanalyse."
|
"usage_tips": "Gebruik dit om de prijssituatie van gisteren te begrijpen ten opzichte van uw persoonlijke drempelwaarden. Vergelijk met vandaag voor trendanalyse."
|
||||||
},
|
},
|
||||||
"today_price_rating": {
|
"today_price_rating": {
|
||||||
"description": "Geaggregeerde prijsbeoordeling voor vandaag",
|
"description": "Geaggregeerde prijsbeoordeling voor vandaag",
|
||||||
"long_description": "Toont de geaggregeerde prijsbeoordeling (laag/normaal/hoog) voor alle intervallen van vandaag, gebaseerd op jouw geconfigureerde drempelwaarden. Gebruikt dezelfde logica als de uursensoren om de totale beoordeling voor de hele dag te bepalen.",
|
"long_description": "Toont de geaggregeerde prijsbeoordeling (laag/normaal/hoog) voor alle intervallen van vandaag, gebaseerd op uw geconfigureerde drempelwaarden. Gebruikt dezelfde logica als de uursensoren om de totale beoordeling voor de hele dag te bepalen.",
|
||||||
"usage_tips": "Gebruik dit om snel de prijssituatie van vandaag te beoordelen ten opzichte van jouw persoonlijke drempelwaarden. Helpt bij het nemen van verbruiksbeslissingen voor de huidige dag."
|
"usage_tips": "Gebruik dit om snel de prijssituatie van vandaag te beoordelen ten opzichte van uw persoonlijke drempelwaarden. Helpt bij het nemen van verbruiksbeslissingen voor de huidige dag."
|
||||||
},
|
},
|
||||||
"tomorrow_price_rating": {
|
"tomorrow_price_rating": {
|
||||||
"description": "Geaggregeerde prijsbeoordeling voor morgen",
|
"description": "Geaggregeerde prijsbeoordeling voor morgen",
|
||||||
"long_description": "Toont de geaggregeerde prijsbeoordeling (laag/normaal/hoog) voor alle intervallen van morgen, gebaseerd op jouw geconfigureerde drempelwaarden. Gebruikt dezelfde logica als de uursensoren om de totale beoordeling voor de hele dag te bepalen. Deze sensor wordt niet beschikbaar totdat de gegevens van morgen door Tibber worden gepubliceerd (meestal rond 13:00-14:00 CET).",
|
"long_description": "Toont de geaggregeerde prijsbeoordeling (laag/normaal/hoog) voor alle intervallen van morgen, gebaseerd op uw geconfigureerde drempelwaarden. Gebruikt dezelfde logica als de uursensoren om de totale beoordeling voor de hele dag te bepalen. Deze sensor wordt niet beschikbaar totdat de gegevens van morgen door Tibber worden gepubliceerd (meestal rond 13:00-14:00 CET).",
|
||||||
"usage_tips": "Gebruik dit om het energieverbruik van morgen te plannen op basis van jouw persoonlijke prijsdrempelwaarden. Vergelijk met vandaag om te beslissen of je verbruik naar morgen moet verschuiven of vandaag energie moet gebruiken."
|
"usage_tips": "Gebruik dit om het energieverbruik van morgen te plannen op basis van uw persoonlijke prijsdrempelwaarden. Vergelijk met vandaag om te beslissen of u verbruik naar morgen moet verschuiven of vandaag energie moet gebruiken."
|
||||||
},
|
},
|
||||||
"trailing_price_average": {
|
"trailing_price_average": {
|
||||||
"description": "Typische elektriciteitsprijs voor de afgelopen 24 uur per kWh (configureerbare weergave)",
|
"description": "De gemiddelde elektriciteitsprijs voor de afgelopen 24 uur per kWh",
|
||||||
"long_description": "Toont de prijs per kWh berekend uit de afgelopen 24 uur. **Standaard toont de status de mediaan** (resistent tegen extreme prijspieken, toont typisch prijsniveau). Je kunt dit wijzigen in de integratie-instellingen om het rekenkundig gemiddelde te tonen. De alternatieve waarde is beschikbaar als attribuut. Wordt elke 15 minuten bijgewerkt.",
|
"long_description": "Toont de gemiddelde prijs per kWh berekend uit de afgelopen 24 uur (voortschrijdend gemiddelde) van uw Tibber-abonnement. Dit biedt een voortschrijdend gemiddelde dat elke 15 minuten wordt bijgewerkt op basis van historische gegevens.",
|
||||||
"usage_tips": "Gebruik de statuswaarde om het typische huidige prijsniveau te zien. Voor kostenberekeningen gebruik: {{ state_attr('sensor.trailing_price_average', 'price_mean') }}"
|
"usage_tips": "Gebruik dit om huidige prijzen te vergelijken met recente trends. Een huidige prijs die aanzienlijk boven dit gemiddelde ligt, kan aangeven dat het een goed moment is om het verbruik te verminderen."
|
||||||
},
|
},
|
||||||
"leading_price_average": {
|
"leading_price_average": {
|
||||||
"description": "Typische elektriciteitsprijs voor de komende 24 uur per kWh (configureerbare weergave)",
|
"description": "De gemiddelde elektriciteitsprijs voor de komende 24 uur per kWh",
|
||||||
"long_description": "Toont de prijs per kWh berekend uit de komende 24 uur. **Standaard toont de status de mediaan** (resistent tegen extreme prijspieken, toont verwacht prijsniveau). Je kunt dit wijzigen in de integratie-instellingen om het rekenkundig gemiddelde te tonen. De alternatieve waarde is beschikbaar als attribuut.",
|
"long_description": "Toont de gemiddelde prijs per kWh berekend uit de komende 24 uur (vooruitlopend gemiddelde) van uw Tibber-abonnement. Dit biedt een vooruitkijkend gemiddelde op basis van beschikbare prognosegegevens.",
|
||||||
"usage_tips": "Gebruik de statuswaarde om het typische toekomstige prijsniveau te zien. Voor kostenberekeningen gebruik: {{ state_attr('sensor.leading_price_average', 'price_mean') }}"
|
"usage_tips": "Gebruik dit om energieverbruik te plannen. Als de huidige prijs onder het vooruitlopende gemiddelde ligt, kan het een goed moment zijn om energie-intensieve apparaten te laten draaien."
|
||||||
},
|
},
|
||||||
"trailing_price_min": {
|
"trailing_price_min": {
|
||||||
"description": "De minimale elektriciteitsprijs voor de afgelopen 24 uur per kWh",
|
"description": "De minimale elektriciteitsprijs voor de afgelopen 24 uur per kWh",
|
||||||
"long_description": "Toont de minimumprijs per kWh van de afgelopen 24 uur (voortschrijdend minimum) van je Tibber-abonnement. Dit geeft de laagste prijs die in de afgelopen 24 uur is gezien.",
|
"long_description": "Toont de minimumprijs per kWh van de afgelopen 24 uur (voortschrijdend minimum) van uw Tibber-abonnement. Dit geeft de laagste prijs die in de afgelopen 24 uur is gezien.",
|
||||||
"usage_tips": "Gebruik dit om de beste prijsmogelijkheid te zien die je in de afgelopen 24 uur had en vergelijk deze met huidige prijzen."
|
"usage_tips": "Gebruik dit om de beste prijsmogelijkheid te zien die u in de afgelopen 24 uur had en vergelijk deze met huidige prijzen."
|
||||||
},
|
},
|
||||||
"trailing_price_max": {
|
"trailing_price_max": {
|
||||||
"description": "De maximale elektriciteitsprijs voor de afgelopen 24 uur per kWh",
|
"description": "De maximale elektriciteitsprijs voor de afgelopen 24 uur per kWh",
|
||||||
"long_description": "Toont de maximumprijs per kWh van de afgelopen 24 uur (voortschrijdend maximum) van je Tibber-abonnement. Dit geeft de hoogste prijs die in de afgelopen 24 uur is gezien.",
|
"long_description": "Toont de maximumprijs per kWh van de afgelopen 24 uur (voortschrijdend maximum) van uw Tibber-abonnement. Dit geeft de hoogste prijs die in de afgelopen 24 uur is gezien.",
|
||||||
"usage_tips": "Gebruik dit om de piekprijs in de afgelopen 24 uur te zien en prijsvolatiliteit te beoordelen."
|
"usage_tips": "Gebruik dit om de piekprijs in de afgelopen 24 uur te zien en prijsvolatiliteit te beoordelen."
|
||||||
},
|
},
|
||||||
"leading_price_min": {
|
"leading_price_min": {
|
||||||
"description": "De minimale elektriciteitsprijs voor de komende 24 uur per kWh",
|
"description": "De minimale elektriciteitsprijs voor de komende 24 uur per kWh",
|
||||||
"long_description": "Toont de minimumprijs per kWh van de komende 24 uur (vooruitlopend minimum) van je Tibber-abonnement. Dit geeft de laagste prijs die wordt verwacht in de komende 24 uur op basis van prognosegegevens.",
|
"long_description": "Toont de minimumprijs per kWh van de komende 24 uur (vooruitlopend minimum) van uw Tibber-abonnement. Dit geeft de laagste prijs die wordt verwacht in de komende 24 uur op basis van prognosegegevens.",
|
||||||
"usage_tips": "Gebruik dit om de beste prijsmogelijkheid te identificeren die eraan komt en plan energie-intensieve taken dienovereenkomstig."
|
"usage_tips": "Gebruik dit om de beste prijsmogelijkheid te identificeren die eraan komt en plan energie-intensieve taken dienovereenkomstig."
|
||||||
},
|
},
|
||||||
"leading_price_max": {
|
"leading_price_max": {
|
||||||
"description": "De maximale elektriciteitsprijs voor de komende 24 uur per kWh",
|
"description": "De maximale elektriciteitsprijs voor de komende 24 uur per kWh",
|
||||||
"long_description": "Toont de maximumprijs per kWh van de komende 24 uur (vooruitlopend maximum) van je Tibber-abonnement. Dit geeft de hoogste prijs die wordt verwacht in de komende 24 uur op basis van prognosegegevens.",
|
"long_description": "Toont de maximumprijs per kWh van de komende 24 uur (vooruitlopend maximum) van uw Tibber-abonnement. Dit geeft de hoogste prijs die wordt verwacht in de komende 24 uur op basis van prognosegegevens.",
|
||||||
"usage_tips": "Gebruik dit om te voorkomen dat je apparaten draait tijdens aanstaande piekprijsperioden."
|
"usage_tips": "Gebruik dit om te voorkomen dat u apparaten draait tijdens aanstaande piekprijsperioden."
|
||||||
},
|
},
|
||||||
"current_interval_price_level": {
|
"current_interval_price_level": {
|
||||||
"description": "De huidige prijsniveauclassificatie",
|
"description": "De huidige prijsniveauclassificatie",
|
||||||
|
|
@ -155,7 +142,7 @@
|
||||||
"current_hour_price_level": {
|
"current_hour_price_level": {
|
||||||
"description": "Geaggregeerd prijsniveau voor huidig voortschrijdend uur (5 intervallen)",
|
"description": "Geaggregeerd prijsniveau voor huidig voortschrijdend uur (5 intervallen)",
|
||||||
"long_description": "Toont het mediane prijsniveau over 5 intervallen (2 ervoor, huidig, 2 erna) dat ongeveer 75 minuten beslaat. Biedt een stabielere prijsniveauindicator die kortetermijnschommelingen afvlakt.",
|
"long_description": "Toont het mediane prijsniveau over 5 intervallen (2 ervoor, huidig, 2 erna) dat ongeveer 75 minuten beslaat. Biedt een stabielere prijsniveauindicator die kortetermijnschommelingen afvlakt.",
|
||||||
"usage_tips": "Gebruik voor planningsbeslissingen op middellange termijn waarbij je niet wilt reageren op korte prijspieken of -dalingen."
|
"usage_tips": "Gebruik voor planningsbeslissingen op middellange termijn waarbij u niet wilt reageren op korte prijspieken of -dalingen."
|
||||||
},
|
},
|
||||||
"next_hour_price_level": {
|
"next_hour_price_level": {
|
||||||
"description": "Geaggregeerd prijsniveau voor volgend voortschrijdend uur (5 intervallen vooruit)",
|
"description": "Geaggregeerd prijsniveau voor volgend voortschrijdend uur (5 intervallen vooruit)",
|
||||||
|
|
@ -185,22 +172,22 @@
|
||||||
"next_hour_price_rating": {
|
"next_hour_price_rating": {
|
||||||
"description": "Geaggregeerde prijsbeoordeling voor volgend voortschrijdend uur (5 intervallen vooruit)",
|
"description": "Geaggregeerde prijsbeoordeling voor volgend voortschrijdend uur (5 intervallen vooruit)",
|
||||||
"long_description": "Toont de gemiddelde beoordeling voor 5 intervallen gecentreerd één uur vooruit. Helpt te begrijpen of het volgende uur over het algemeen boven of onder gemiddelde prijzen zal liggen.",
|
"long_description": "Toont de gemiddelde beoordeling voor 5 intervallen gecentreerd één uur vooruit. Helpt te begrijpen of het volgende uur over het algemeen boven of onder gemiddelde prijzen zal liggen.",
|
||||||
"usage_tips": "Gebruik om te beslissen of je een uur moet wachten voordat je activiteiten met hoog verbruik start."
|
"usage_tips": "Gebruik om te beslissen of u een uur moet wachten voordat u activiteiten met hoog verbruik start."
|
||||||
},
|
},
|
||||||
"next_avg_1h": {
|
"next_avg_1h": {
|
||||||
"description": "Gemiddelde prijs voor het volgende 1 uur (alleen vooruit vanaf volgend interval)",
|
"description": "Gemiddelde prijs voor het volgende 1 uur (alleen vooruit vanaf volgend interval)",
|
||||||
"long_description": "Vooruitkijkend gemiddelde: Toont gemiddelde van volgende 4 intervallen (1 uur) vanaf het VOLGENDE 15-minuten interval (niet inclusief huidig). Verschilt van current_hour_average_price die vorige intervallen omvat. Gebruik voor absolute prijsdrempelplanning.",
|
"long_description": "Vooruitkijkend gemiddelde: Toont gemiddelde van volgende 4 intervallen (1 uur) vanaf het VOLGENDE 15-minuten interval (niet inclusief huidig). Verschilt van current_hour_average_price die vorige intervallen omvat. Gebruik voor absolute prijsdrempelplanning.",
|
||||||
"usage_tips": "Absolute prijsdrempel: Start apparaten alleen wanneer het gemiddelde onder je maximaal acceptabele prijs blijft (bijv. onder 0,25 EUR/kWh). Combineer met trendsensor voor optimale timing. Let op: Dit is GEEN vervanging voor uurprijzen - gebruik current_hour_average_price daarvoor."
|
"usage_tips": "Absolute prijsdrempel: Start apparaten alleen wanneer het gemiddelde onder uw maximaal acceptabele prijs blijft (bijv. onder 0,25 EUR/kWh). Combineer met trendsensor voor optimale timing. Let op: Dit is GEEN vervanging voor uurprijzen - gebruik current_hour_average_price daarvoor."
|
||||||
},
|
},
|
||||||
"next_avg_2h": {
|
"next_avg_2h": {
|
||||||
"description": "Gemiddelde prijs voor de volgende 2 uur",
|
"description": "Gemiddelde prijs voor de volgende 2 uur",
|
||||||
"long_description": "Toont de gemiddelde prijs voor de volgende 8 intervallen (2 uur) vanaf het volgende 15-minuten interval.",
|
"long_description": "Toont de gemiddelde prijs voor de volgende 8 intervallen (2 uur) vanaf het volgende 15-minuten interval.",
|
||||||
"usage_tips": "Absolute prijsdrempel: Stel een maximaal acceptabele gemiddelde prijs in voor standaard apparaten zoals wasmachines. Zorgt ervoor dat je nooit meer betaalt dan je limiet."
|
"usage_tips": "Absolute prijsdrempel: Stel een maximaal acceptabele gemiddelde prijs in voor standaard apparaten zoals wasmachines. Zorgt ervoor dat u nooit meer betaalt dan uw limiet."
|
||||||
},
|
},
|
||||||
"next_avg_3h": {
|
"next_avg_3h": {
|
||||||
"description": "Gemiddelde prijs voor de volgende 3 uur",
|
"description": "Gemiddelde prijs voor de volgende 3 uur",
|
||||||
"long_description": "Toont de gemiddelde prijs voor de volgende 12 intervallen (3 uur) vanaf het volgende 15-minuten interval.",
|
"long_description": "Toont de gemiddelde prijs voor de volgende 12 intervallen (3 uur) vanaf het volgende 15-minuten interval.",
|
||||||
"usage_tips": "Absolute prijsdrempel: Voor EU Eco-programma's (vaatwassers, 3-4u looptijd). Start alleen wanneer 3u gemiddelde onder je prijslimiet is. Gebruik met trendsensor om beste moment binnen acceptabel prijsbereik te vinden."
|
"usage_tips": "Absolute prijsdrempel: Voor EU Eco-programma's (vaatwassers, 3-4u looptijd). Start alleen wanneer 3u gemiddelde onder uw prijslimiet is. Gebruik met trendsensor om beste moment binnen acceptabel prijsbereik te vinden."
|
||||||
},
|
},
|
||||||
"next_avg_4h": {
|
"next_avg_4h": {
|
||||||
"description": "Gemiddelde prijs voor de volgende 4 uur",
|
"description": "Gemiddelde prijs voor de volgende 4 uur",
|
||||||
|
|
@ -215,32 +202,32 @@
|
||||||
"next_avg_6h": {
|
"next_avg_6h": {
|
||||||
"description": "Gemiddelde prijs voor de volgende 6 uur",
|
"description": "Gemiddelde prijs voor de volgende 6 uur",
|
||||||
"long_description": "Toont de gemiddelde prijs voor de volgende 24 intervallen (6 uur) vanaf het volgende 15-minuten interval.",
|
"long_description": "Toont de gemiddelde prijs voor de volgende 24 intervallen (6 uur) vanaf het volgende 15-minuten interval.",
|
||||||
"usage_tips": "Absolute prijsdrempel: Avondplanning met prijslimieten. Plan taken alleen als 6u gemiddelde onder je maximaal acceptabele kosten blijft."
|
"usage_tips": "Absolute prijsdrempel: Avondplanning met prijslimieten. Plan taken alleen als 6u gemiddelde onder uw maximaal acceptabele kosten blijft."
|
||||||
},
|
},
|
||||||
"next_avg_8h": {
|
"next_avg_8h": {
|
||||||
"description": "Gemiddelde prijs voor de volgende 8 uur",
|
"description": "Gemiddelde prijs voor de volgende 8 uur",
|
||||||
"long_description": "Toont de gemiddelde prijs voor de volgende 32 intervallen (8 uur) vanaf het volgende 15-minuten interval.",
|
"long_description": "Toont de gemiddelde prijs voor de volgende 32 intervallen (8 uur) vanaf het volgende 15-minuten interval.",
|
||||||
"usage_tips": "Absolute prijsdrempel: Nachtelijke bedieningsbeslissingen. Stel harde prijslimieten in voor nachtelijke belastingen (batterij opladen, thermische opslag). Overschrijd nooit je budget."
|
"usage_tips": "Absolute prijsdrempel: Nachtelijke bedieningsbeslissingen. Stel harde prijslimieten in voor nachtelijke belastingen (batterij opladen, thermische opslag). Overschrijd nooit uw budget."
|
||||||
},
|
},
|
||||||
"next_avg_12h": {
|
"next_avg_12h": {
|
||||||
"description": "Gemiddelde prijs voor de volgende 12 uur",
|
"description": "Gemiddelde prijs voor de volgende 12 uur",
|
||||||
"long_description": "Toont de gemiddelde prijs voor de volgende 48 intervallen (12 uur) vanaf het volgende 15-minuten interval.",
|
"long_description": "Toont de gemiddelde prijs voor de volgende 48 intervallen (12 uur) vanaf het volgende 15-minuten interval.",
|
||||||
"usage_tips": "Absolute prijsdrempel: Strategische beslissingen met prijslimieten. Ga alleen door als 12u gemiddelde onder je maximaal acceptabele prijs is. Goed voor uitgestelde grote belastingen."
|
"usage_tips": "Absolute prijsdrempel: Strategische beslissingen met prijslimieten. Ga alleen door als 12u gemiddelde onder uw maximaal acceptabele prijs is. Goed voor uitgestelde grote belastingen."
|
||||||
},
|
},
|
||||||
"price_trend_1h": {
|
"price_trend_1h": {
|
||||||
"description": "Prijstrend voor het volgende uur",
|
"description": "Prijstrend voor het volgende uur",
|
||||||
"long_description": "Vergelijkt huidige intervalprijs met gemiddelde van volgend 1 uur (4 intervallen). Stijgend als toekomst >5% hoger is, dalend als >5% lager, anders stabiel.",
|
"long_description": "Vergelijkt huidige intervalprijs met gemiddelde van volgend 1 uur (4 intervallen). Stijgend als toekomst >5% hoger is, dalend als >5% lager, anders stabiel.",
|
||||||
"usage_tips": "Relatieve optimalisatie: 'dalend' = wacht, prijzen dalen. 'stijgend' = handel nu of je betaalt meer. 'stabiel' = prijs maakt nu niet veel uit. Werkt onafhankelijk van absoluut prijsniveau."
|
"usage_tips": "Relatieve optimalisatie: 'dalend' = wacht, prijzen dalen. 'stijgend' = handel nu of u betaalt meer. 'stabiel' = prijs maakt nu niet veel uit. Werkt onafhankelijk van absoluut prijsniveau."
|
||||||
},
|
},
|
||||||
"price_trend_2h": {
|
"price_trend_2h": {
|
||||||
"description": "Prijstrend voor de volgende 2 uur",
|
"description": "Prijstrend voor de volgende 2 uur",
|
||||||
"long_description": "Vergelijkt huidige intervalprijs met gemiddelde van volgende 2 uur (8 intervallen). Stijgend als toekomst >5% hoger is, dalend als >5% lager, anders stabiel.",
|
"long_description": "Vergelijkt huidige intervalprijs met gemiddelde van volgende 2 uur (8 intervallen). Stijgend als toekomst >5% hoger is, dalend als >5% lager, anders stabiel.",
|
||||||
"usage_tips": "Relatieve optimalisatie: Ideaal voor apparaten. 'dalend' betekent betere prijzen komen over 2u - stel uit indien mogelijk. Vindt beste timing binnen je beschikbare venster, ongeacht seizoen."
|
"usage_tips": "Relatieve optimalisatie: Ideaal voor apparaten. 'dalend' betekent betere prijzen komen over 2u - stel uit indien mogelijk. Vindt beste timing binnen uw beschikbare venster, ongeacht seizoen."
|
||||||
},
|
},
|
||||||
"price_trend_3h": {
|
"price_trend_3h": {
|
||||||
"description": "Prijstrend voor de volgende 3 uur",
|
"description": "Prijstrend voor de volgende 3 uur",
|
||||||
"long_description": "Vergelijkt huidige intervalprijs met gemiddelde van volgende 3 uur (12 intervallen). Stijgend als toekomst >5% hoger is, dalend als >5% lager, anders stabiel.",
|
"long_description": "Vergelijkt huidige intervalprijs met gemiddelde van volgende 3 uur (12 intervallen). Stijgend als toekomst >5% hoger is, dalend als >5% lager, anders stabiel.",
|
||||||
"usage_tips": "Relatieve optimalisatie: Voor Eco-programma's. 'dalend' betekent prijzen dalen >5% - het wachten waard. Werkt in elk seizoen. Combineer met avg-sensor voor prijslimiet: alleen wanneer avg < je limiet EN trend niet 'dalend'."
|
"usage_tips": "Relatieve optimalisatie: Voor Eco-programma's. 'dalend' betekent prijzen dalen >5% - het wachten waard. Werkt in elk seizoen. Combineer met avg-sensor voor prijslimiet: alleen wanneer avg < uw limiet EN trend niet 'dalend'."
|
||||||
},
|
},
|
||||||
"price_trend_4h": {
|
"price_trend_4h": {
|
||||||
"description": "Prijstrend voor de volgende 4 uur",
|
"description": "Prijstrend voor de volgende 4 uur",
|
||||||
|
|
@ -250,12 +237,12 @@
|
||||||
"price_trend_5h": {
|
"price_trend_5h": {
|
||||||
"description": "Prijstrend voor de volgende 5 uur",
|
"description": "Prijstrend voor de volgende 5 uur",
|
||||||
"long_description": "Vergelijkt huidige intervalprijs met gemiddelde van volgende 5 uur (20 intervallen). Stijgend als toekomst >5% hoger is, dalend als >5% lager, anders stabiel.",
|
"long_description": "Vergelijkt huidige intervalprijs met gemiddelde van volgende 5 uur (20 intervallen). Stijgend als toekomst >5% hoger is, dalend als >5% lager, anders stabiel.",
|
||||||
"usage_tips": "Relatieve optimalisatie: Uitgebreide operaties. Past zich aan de markt aan - vindt beste relatieve timing in elke prijsomgeving. 'stabiel/stijgend' = goed moment om te starten binnen je planningsvenster."
|
"usage_tips": "Relatieve optimalisatie: Uitgebreide operaties. Past zich aan de markt aan - vindt beste relatieve timing in elke prijsomgeving. 'stabiel/stijgend' = goed moment om te starten binnen uw planningsvenster."
|
||||||
},
|
},
|
||||||
"price_trend_6h": {
|
"price_trend_6h": {
|
||||||
"description": "Prijstrend voor de volgende 6 uur",
|
"description": "Prijstrend voor de volgende 6 uur",
|
||||||
"long_description": "Vergelijkt huidige intervalprijs met gemiddelde van volgende 6 uur (24 intervallen). Stijgend als toekomst >5% hoger is, dalend als >5% lager, anders stabiel.",
|
"long_description": "Vergelijkt huidige intervalprijs met gemiddelde van volgende 6 uur (24 intervallen). Stijgend als toekomst >5% hoger is, dalend als >5% lager, anders stabiel.",
|
||||||
"usage_tips": "Relatieve optimalisatie: Avandbeslissingen. 'dalend' = prijzen verbeteren aanzienlijk als je wacht. Geen vaste drempels nodig - past automatisch aan winter/zomer prijsniveaus."
|
"usage_tips": "Relatieve optimalisatie: Avandbeslissingen. 'dalend' = prijzen verbeteren aanzienlijk als u wacht. Geen vaste drempels nodig - past automatisch aan winter/zomer prijsniveaus."
|
||||||
},
|
},
|
||||||
"price_trend_8h": {
|
"price_trend_8h": {
|
||||||
"description": "Prijstrend voor de volgende 8 uur",
|
"description": "Prijstrend voor de volgende 8 uur",
|
||||||
|
|
@ -289,27 +276,27 @@
|
||||||
},
|
},
|
||||||
"data_timestamp": {
|
"data_timestamp": {
|
||||||
"description": "Tijdstempel van het laatst beschikbare prijsgegevensinterval",
|
"description": "Tijdstempel van het laatst beschikbare prijsgegevensinterval",
|
||||||
"long_description": "Toont het tijdstempel van het laatst beschikbare prijsgegevensinterval van je Tibber-abonnement"
|
"long_description": "Toont het tijdstempel van het laatst beschikbare prijsgegevensinterval van uw Tibber-abonnement"
|
||||||
},
|
},
|
||||||
"today_volatility": {
|
"today_volatility": {
|
||||||
"description": "Hoeveel de stroomprijzen vandaag schommelen",
|
"description": "Prijsvolatiliteitsclassificatie voor vandaag",
|
||||||
"long_description": "Geeft aan of de prijzen vandaag stabiel blijven of grote schommelingen hebben. Lage volatiliteit betekent vrij constante prijzen – timing maakt weinig uit. Hoge volatiliteit betekent duidelijke prijsverschillen gedurende de dag – goede kans om verbruik naar goedkopere periodes te verschuiven. `price_coefficient_variation_%` toont het percentage, `price_spread` de absolute prijsspanne.",
|
"long_description": "Toont hoeveel elektriciteitsprijzen variëren gedurende vandaag op basis van de spreiding (verschil tussen hoogste en laagste prijs). Classificatie: LOW = spreiding < 5ct, MODERATE = 5-15ct, HIGH = 15-30ct, VERY HIGH = >30ct.",
|
||||||
"usage_tips": "Gebruik dit om te beslissen of optimaliseren de moeite waard is. Bij lage volatiliteit kun je apparaten op elk moment laten draaien. Bij hoge volatiliteit bespaar je merkbaar door Best Price-periodes te volgen."
|
"usage_tips": "Gebruik dit om te bepalen of prijsgebaseerde optimalisatie de moeite waard is. Bijvoorbeeld, met een balkonbatterij met 15% efficiëntieverlies is optimalisatie alleen zinvol wanneer volatiliteit ten minste MODERATE is. Maak automatiseringen die volatiliteit controleren voordat u laad-/ontlaadcycli plant."
|
||||||
},
|
},
|
||||||
"tomorrow_volatility": {
|
"tomorrow_volatility": {
|
||||||
"description": "Hoeveel de stroomprijzen morgen zullen schommelen",
|
"description": "Prijsvolatiliteitsclassificatie voor morgen",
|
||||||
"long_description": "Geeft aan of de prijzen morgen stabiel blijven of grote schommelingen hebben. Beschikbaar zodra de gegevens voor morgen zijn gepubliceerd (meestal 13:00–14:00 CET). Lage volatiliteit betekent vrij constante prijzen – timing is niet kritisch. Hoge volatiliteit betekent duidelijke prijsverschillen gedurende de dag – goede kans om energie-intensieve taken te plannen. `price_coefficient_variation_%` toont het percentage, `price_spread` de absolute prijsspanne.",
|
"long_description": "Toont hoeveel elektriciteitsprijzen zullen variëren gedurende morgen op basis van de spreiding (verschil tussen hoogste en laagste prijs). Wordt onbeschikbaar totdat de gegevens van morgen zijn gepubliceerd (meestal 13:00-14:00 CET).",
|
||||||
"usage_tips": "Gebruik dit om het verbruik van morgen te plannen. Hoge volatiliteit? Plan flexibele lasten in Best Price-periodes. Lage volatiliteit? Laat apparaten draaien wanneer het jou uitkomt."
|
"usage_tips": "Gebruik dit voor vooruitplanning van het energieverbruik van morgen. Als morgen HIGH of VERY HIGH volatiliteit heeft, is het de moeite waard om de timing van energieverbruik te optimaliseren. Bij LOW kunt u apparaten op elk moment gebruiken zonder significante kostenverschillen."
|
||||||
},
|
},
|
||||||
"next_24h_volatility": {
|
"next_24h_volatility": {
|
||||||
"description": "Hoeveel de prijzen de komende 24 uur zullen schommelen",
|
"description": "Prijsvolatiliteitsclassificatie voor de rollende volgende 24 uur",
|
||||||
"long_description": "Geeft de prijsvolatiliteit aan voor een rollend 24-uursvenster vanaf nu (wordt elke 15 minuten bijgewerkt). Lage volatiliteit betekent vrij constante prijzen. Hoge volatiliteit betekent merkbare prijsschommelingen en dus optimalisatiemogelijkheden. In tegenstelling tot vandaag/morgen-sensoren overschrijdt deze daggrenzen en geeft een doorlopende vooruitblik. `price_coefficient_variation_%` toont het percentage, `price_spread` de absolute prijsspanne.",
|
"long_description": "Toont hoeveel elektriciteitsprijzen variëren in de volgende 24 uur vanaf nu (rollend venster). Dit overschrijdt daggrenzen en wordt elke 15 minuten bijgewerkt, wat een vooruitkijkende volatiliteitsbeoordeling biedt onafhankelijk van kalenderdagen.",
|
||||||
"usage_tips": "Het beste voor beslissingen in real-time. Gebruik bij het plannen van batterijladen of andere flexibele lasten die over middernacht kunnen lopen. Biedt een consistent 24-uurs beeld, los van de kalenderdag."
|
"usage_tips": "Beste sensor voor realtime optimalisatiebeslissingen. In tegenstelling tot vandaag/morgen-sensoren die om middernacht wisselen, biedt deze een continue 24-uurs volatiliteitsbeoordeling. Gebruik voor batterijlaadstrategieën die over daggrenzen heen gaan."
|
||||||
},
|
},
|
||||||
"today_tomorrow_volatility": {
|
"today_tomorrow_volatility": {
|
||||||
"description": "Gecombineerde prijsvolatiliteit voor vandaag en morgen",
|
"description": "Gecombineerde prijsvolatiliteitsclassificatie voor vandaag en morgen",
|
||||||
"long_description": "Geeft de totale volatiliteit weer wanneer vandaag en morgen samen worden bekeken (zodra morgengegevens beschikbaar zijn). Toont of er duidelijke prijsverschillen over de daggrens heen zijn. Valt terug naar alleen vandaag als morgengegevens ontbreken. Handig voor meerdaagse optimalisatie. `price_coefficient_variation_%` toont het percentage, `price_spread` de absolute prijsspanne.",
|
"long_description": "Toont volatiliteit over zowel vandaag als morgen gecombineerd (wanneer de gegevens van morgen beschikbaar zijn). Biedt een uitgebreid overzicht van prijsvariatie over maximaal 48 uur. Valt terug op alleen vandaag wanneer de gegevens van morgen nog niet beschikbaar zijn.",
|
||||||
"usage_tips": "Gebruik voor taken die meerdere dagen beslaan. Kijk of de prijsverschillen groot genoeg zijn om plannen op te baseren. De afzonderlijke dag-sensoren tonen per-dag bijdragen als je meer detail wilt."
|
"usage_tips": "Gebruik dit voor meerdaagse planning en om te begrijpen of prijskansen bestaan over de daggrenzen heen. De attributen 'today_volatility' en 'tomorrow_volatility' tonen individuele dagbijdragen. Handig voor het plannen van laadsessies die middernacht kunnen overschrijden."
|
||||||
},
|
},
|
||||||
"data_lifecycle_status": {
|
"data_lifecycle_status": {
|
||||||
"description": "Huidige status van prijsgegevenslevenscyclus en caching",
|
"description": "Huidige status van prijsgegevenslevenscyclus en caching",
|
||||||
|
|
@ -317,49 +304,39 @@
|
||||||
"usage_tips": "Gebruik deze diagnostische sensor om gegevensfrisheid en API-aanroeppatronen te begrijpen. Controleer het 'cache_age'-attribuut om te zien hoe oud de huidige gegevens zijn. Monitor 'next_api_poll' om te weten wanneer de volgende update is gepland. Gebruik 'data_completeness' om te zien of gisteren/vandaag/morgen gegevens beschikbaar zijn. De 'api_calls_today'-teller helpt API-gebruik bij te houden. Perfect voor probleemoplossing of begrip van integratiegedrag."
|
"usage_tips": "Gebruik deze diagnostische sensor om gegevensfrisheid en API-aanroeppatronen te begrijpen. Controleer het 'cache_age'-attribuut om te zien hoe oud de huidige gegevens zijn. Monitor 'next_api_poll' om te weten wanneer de volgende update is gepland. Gebruik 'data_completeness' om te zien of gisteren/vandaag/morgen gegevens beschikbaar zijn. De 'api_calls_today'-teller helpt API-gebruik bij te houden. Perfect voor probleemoplossing of begrip van integratiegedrag."
|
||||||
},
|
},
|
||||||
"best_price_end_time": {
|
"best_price_end_time": {
|
||||||
"description": "Totale lengte van huidige of volgende voordelige periode (state in uren, attribuut in minuten)",
|
"description": "Wanneer de huidige of volgende goedkope periode eindigt",
|
||||||
"long_description": "Toont hoe lang de voordelige periode duurt. State gebruikt uren (float) voor een leesbare UI; attribuut `period_duration_minutes` behoudt afgeronde minuten voor automatiseringen. Actief → duur van de huidige periode, anders de volgende.",
|
"long_description": "Toont het eindtijdstempel van de huidige goedkope periode wanneer actief, of het einde van de volgende periode wanneer geen periode actief is. Toont altijd een nuttige tijdreferentie voor planning. Geeft alleen 'Onbekend' terug wanneer geen periodes zijn geconfigureerd.",
|
||||||
"usage_tips": "UI kan 1,5 u tonen terwijl `period_duration_minutes` = 90 voor automatiseringen blijft."
|
"usage_tips": "Gebruik dit om een aftelling weer te geven zoals 'Goedkope periode eindigt over 2 uur' (wanneer actief) of 'Volgende goedkope periode eindigt om 14:00' (wanneer inactief). Home Assistant toont automatisch relatieve tijd voor tijdstempelsensoren."
|
||||||
},
|
|
||||||
"best_price_period_duration": {
|
|
||||||
"description": "Lengte van huidige/volgende goedkope periode",
|
|
||||||
"long_description": "Totale duur van huidige of volgende goedkope periode. De state wordt weergegeven in uren (bijv. 1,5 u) voor gemakkelijk aflezen in de UI, terwijl het attribuut `period_duration_minutes` dezelfde waarde in minuten levert (bijv. 90) voor automatiseringen. Deze waarde vertegenwoordigt de **volledige geplande duur** van de periode en is constant gedurende de gehele periode, zelfs als de resterende tijd (remaining_minutes) afneemt.",
|
|
||||||
"usage_tips": "Combineer met remaining_minutes om te berekenen wanneer langlopende apparaten moeten worden gestopt: Periode is `period_duration_minutes - remaining_minutes` minuten geleden gestart. Dit attribuut ondersteunt energie-optimalisatiestrategieën door te helpen bij het plannen van hoog-verbruiksactiviteiten binnen goedkope periodes."
|
|
||||||
},
|
},
|
||||||
"best_price_remaining_minutes": {
|
"best_price_remaining_minutes": {
|
||||||
"description": "Resterende tijd in huidige goedkope periode",
|
"description": "Resterende minuten in huidige goedkope periode (0 wanneer inactief)",
|
||||||
"long_description": "Toont hoeveel tijd er nog overblijft in de huidige goedkope periode. De state wordt weergegeven in uren (bijv. 0,75 u) voor gemakkelijk aflezen in dashboards, terwijl het attribuut `remaining_minutes` dezelfde tijd in minuten levert (bijv. 45) voor automatiseringsvoorwaarden. **Afteltimer**: Deze waarde neemt elke minuut af tijdens een actieve periode. Geeft 0 terug wanneer geen goedkope periode actief is. Werkt elke minuut bij.",
|
"long_description": "Toont hoeveel minuten er nog over zijn in de huidige goedkope periode. Geeft 0 terug wanneer geen periode actief is. Werkt elke minuut bij. Controleer binary_sensor.best_price_period om te zien of een periode momenteel actief is.",
|
||||||
"usage_tips": "Voor automatiseringen: Gebruik attribuut `remaining_minutes` zoals 'Als remaining_minutes > 60, start vaatwasser nu (genoeg tijd om te voltooien)' of 'Als remaining_minutes < 15, rond huidige cyclus binnenkort af'. UI toont gebruiksvriendelijke uren (bijv. 1,25 u). Waarde 0 geeft aan dat geen goedkope periode actief is."
|
"usage_tips": "Perfect voor automatiseringen: 'Als remaining_minutes > 0 EN remaining_minutes < 30, start wasmachine nu'. De waarde 0 maakt het gemakkelijk om te controleren of een periode actief is (waarde > 0) of niet (waarde = 0)."
|
||||||
},
|
},
|
||||||
"best_price_progress": {
|
"best_price_progress": {
|
||||||
"description": "Voortgang door huidige goedkope periode (0% wanneer inactief)",
|
"description": "Voortgang door huidige goedkope periode (0% wanneer inactief)",
|
||||||
"long_description": "Toont voortgang door de huidige goedkope periode als 0-100%. Geeft 0% terug wanneer geen periode actief is. Werkt elke minuut bij. 0% betekent periode net gestart, 100% betekent dat deze bijna eindigt.",
|
"long_description": "Toont de voortgang door de huidige goedkope periode als 0-100%. Geeft 0% terug wanneer geen periode actief is. Werkt elke minuut bij. 0% betekent periode net gestart, 100% betekent het eindigt bijna.",
|
||||||
"usage_tips": "Geweldig voor visuele voortgangsbalken. Gebruik in automatiseringen: 'Als progress > 0 EN progress > 75, stuur melding dat goedkope periode bijna eindigt'. Waarde 0 geeft aan dat geen periode actief is."
|
"usage_tips": "Geweldig voor visuele voortgangsbalken. Gebruik in automatiseringen: 'Als progress > 0 EN progress > 75, stuur melding dat goedkope periode bijna eindigt'. Waarde 0 geeft aan dat er geen actieve periode is."
|
||||||
},
|
},
|
||||||
"best_price_next_start_time": {
|
"best_price_next_start_time": {
|
||||||
"description": "Totale lengte van huidige of volgende dure periode (state in uren, attribuut in minuten)",
|
"description": "Wanneer de volgende goedkope periode begint",
|
||||||
"long_description": "Toont hoe lang de dure periode duurt. State gebruikt uren (float) voor de UI; attribuut `period_duration_minutes` behoudt afgeronde minuten voor automatiseringen. Actief → duur van de huidige periode, anders de volgende.",
|
"long_description": "Toont wanneer de volgende komende goedkope periode begint. Tijdens een actieve periode toont dit de start van de VOLGENDE periode na de huidige. Geeft alleen 'Onbekend' terug wanneer geen toekomstige periodes zijn geconfigureerd.",
|
||||||
"usage_tips": "UI kan 0,75 u tonen terwijl `period_duration_minutes` = 45 voor automatiseringen blijft."
|
"usage_tips": "Altijd nuttig voor vooruitplanning: 'Volgende goedkope periode begint over 3 uur' (of je nu in een periode zit of niet). Combineer met automatiseringen: 'Wanneer volgende starttijd over 10 minuten is, stuur melding om wasmachine voor te bereiden'."
|
||||||
},
|
},
|
||||||
"best_price_next_in_minutes": {
|
"best_price_next_in_minutes": {
|
||||||
"description": "Resterende tijd in huidige dure periode (state in uren, attribuut in minuten)",
|
"description": "Minuten tot volgende goedkope periode begint (0 bij overgang)",
|
||||||
"long_description": "Toont hoeveel tijd er nog over is. State gebruikt uren (float); attribuut `remaining_minutes` behoudt afgeronde minuten voor automatiseringen. Geeft 0 terug wanneer er geen periode actief is. Werkt elke minuut bij.",
|
"long_description": "Toont minuten tot de volgende goedkope periode begint. Tijdens een actieve periode toont dit de tijd tot de periode NA de huidige. Geeft 0 terug tijdens korte overgangsmomenten. Werkt elke minuut bij.",
|
||||||
"usage_tips": "Gebruik `remaining_minutes` voor drempels (bijv. > 60) terwijl de state in uren goed leesbaar blijft."
|
"usage_tips": "Perfect voor 'wacht tot goedkope periode' automatiseringen: 'Als next_in_minutes > 0 EN next_in_minutes < 15, wacht voordat vaatwasser wordt gestart'. Waarde > 0 geeft altijd aan dat een toekomstige periode is gepland."
|
||||||
},
|
},
|
||||||
"peak_price_end_time": {
|
"peak_price_end_time": {
|
||||||
"description": "Tijd tot volgende dure periode (state in uren, attribuut in minuten)",
|
"description": "Wanneer de huidige of volgende dure periode eindigt",
|
||||||
"long_description": "Toont hoe lang het duurt tot de volgende dure periode start. State gebruikt uren (float); attribuut `next_in_minutes` behoudt afgeronde minuten voor automatiseringen. Tijdens een actieve periode is dit de tijd tot de periode na de huidige. 0 tijdens korte overgangen. Werkt elke minuut bij.",
|
"long_description": "Toont het eindtijdstempel van de huidige dure periode wanneer actief, of het einde van de volgende periode wanneer geen periode actief is. Toont altijd een nuttige tijdreferentie voor planning. Geeft alleen 'Onbekend' terug wanneer geen periodes zijn geconfigureerd.",
|
||||||
"usage_tips": "Gebruik `next_in_minutes` in automatiseringen (bijv. < 10) terwijl de state in uren leesbaar blijft."
|
"usage_tips": "Gebruik dit om 'Dure periode eindigt over 1 uur' weer te geven (wanneer actief) of 'Volgende dure periode eindigt om 18:00' (wanneer inactief). Combineer met automatiseringen om activiteiten te hervatten na piek."
|
||||||
},
|
|
||||||
"peak_price_period_duration": {
|
|
||||||
"description": "Totale duur van huidige of volgende dure periode in minuten",
|
|
||||||
"long_description": "Toont de totale duur van de dure periode in minuten. Tijdens een actieve periode toont dit de volledige lengte van de huidige periode. Wanneer geen periode actief is, toont dit de duur van de volgende komende periode. Voorbeeld: '60 minuten' voor een 1-uur periode.",
|
|
||||||
"usage_tips": "Gebruik om energiebesparende maatregelen te plannen: 'Als duration > 120, verlaag verwarmingstemperatuur agressiever (lange dure periode)'. Helpt bij het inschatten hoeveel energieverbruik moet worden verminderd."
|
|
||||||
},
|
},
|
||||||
"peak_price_remaining_minutes": {
|
"peak_price_remaining_minutes": {
|
||||||
"description": "Resterende tijd in huidige dure periode",
|
"description": "Resterende minuten in huidige dure periode (0 wanneer inactief)",
|
||||||
"long_description": "Toont hoeveel tijd er nog overblijft in de huidige dure periode. De state wordt weergegeven in uren (bijv. 0,75 u) voor gemakkelijk aflezen in dashboards, terwijl het attribuut `remaining_minutes` dezelfde tijd in minuten levert (bijv. 45) voor automatiseringsvoorwaarden. **Afteltimer**: Deze waarde neemt elke minuut af tijdens een actieve periode. Geeft 0 terug wanneer geen dure periode actief is. Werkt elke minuut bij.",
|
"long_description": "Toont hoeveel minuten er nog over zijn in de huidige dure periode. Geeft 0 terug wanneer geen periode actief is. Werkt elke minuut bij. Controleer binary_sensor.peak_price_period om te zien of een periode momenteel actief is.",
|
||||||
"usage_tips": "Voor automatiseringen: Gebruik attribuut `remaining_minutes` zoals 'Als remaining_minutes > 60, annuleer uitgestelde laadronde' of 'Als remaining_minutes < 15, hervat normaal gebruik binnenkort'. UI toont gebruiksvriendelijke uren (bijv. 1,0 u). Waarde 0 geeft aan dat geen dure periode actief is."
|
"usage_tips": "Gebruik in automatiseringen: 'Als remaining_minutes > 60, annuleer uitgestelde laadronde'. Waarde 0 maakt het gemakkelijk om onderscheid te maken tussen actieve (waarde > 0) en inactieve (waarde = 0) periodes."
|
||||||
},
|
},
|
||||||
"peak_price_progress": {
|
"peak_price_progress": {
|
||||||
"description": "Voortgang door huidige dure periode (0% wanneer inactief)",
|
"description": "Voortgang door huidige dure periode (0% wanneer inactief)",
|
||||||
|
|
@ -372,9 +349,19 @@
|
||||||
"usage_tips": "Altijd nuttig voor planning: 'Volgende dure periode begint over 2 uur'. Automatisering: 'Wanneer volgende starttijd over 30 minuten is, verlaag verwarmingstemperatuur preventief'."
|
"usage_tips": "Altijd nuttig voor planning: 'Volgende dure periode begint over 2 uur'. Automatisering: 'Wanneer volgende starttijd over 30 minuten is, verlaag verwarmingstemperatuur preventief'."
|
||||||
},
|
},
|
||||||
"peak_price_next_in_minutes": {
|
"peak_price_next_in_minutes": {
|
||||||
"description": "Tijd tot volgende dure periode",
|
"description": "Minuten tot volgende dure periode begint (0 bij overgang)",
|
||||||
"long_description": "Toont hoe lang het duurt tot de volgende dure periode. De state wordt weergegeven in uren (bijv. 0,5 u) voor dashboards, terwijl het attribuut `next_in_minutes` minuten levert (bijv. 30) voor automatiseringsvoorwaarden. Tijdens een actieve periode toont dit de tijd tot de periode NA de huidige. Geeft 0 terug tijdens korte overgangsmomenten. Werkt elke minuut bij.",
|
"long_description": "Toont minuten tot de volgende dure periode begint. Tijdens een actieve periode toont dit de tijd tot de periode NA de huidige. Geeft 0 terug tijdens korte overgangsmomenten. Werkt elke minuut bij.",
|
||||||
"usage_tips": "Voor automatiseringen: Gebruik attribuut `next_in_minutes` zoals 'Als next_in_minutes > 0 EN next_in_minutes < 10, voltooi huidige laadcyclus nu voordat prijzen stijgen'. Waarde > 0 geeft altijd aan dat een toekomstige dure periode is gepland."
|
"usage_tips": "Preventieve automatisering: 'Als next_in_minutes > 0 EN next_in_minutes < 10, voltooi huidige laadcyclus nu voordat prijzen stijgen'."
|
||||||
|
},
|
||||||
|
"best_price_period_duration": {
|
||||||
|
"description": "Totale duur van huidige of volgende goedkope periode in minuten",
|
||||||
|
"long_description": "Toont de totale duur van de goedkope periode in minuten. Tijdens een actieve periode toont dit de volledige lengte van de huidige periode. Wanneer geen periode actief is, toont dit de duur van de volgende komende periode. Voorbeeld: '90 minuten' voor een 1,5-uur periode.",
|
||||||
|
"usage_tips": "Combineer met remaining_minutes voor taakplanning: 'Als duration = 120 EN remaining_minutes > 90, start wasmachine (genoeg tijd om te voltooien)'. Nuttig om te begrijpen of periodes lang genoeg zijn voor energie-intensieve taken."
|
||||||
|
},
|
||||||
|
"peak_price_period_duration": {
|
||||||
|
"description": "Totale duur van huidige of volgende dure periode in minuten",
|
||||||
|
"long_description": "Toont de totale duur van de dure periode in minuten. Tijdens een actieve periode toont dit de volledige lengte van de huidige periode. Wanneer geen periode actief is, toont dit de duur van de volgende komende periode. Voorbeeld: '60 minuten' voor een 1-uur periode.",
|
||||||
|
"usage_tips": "Gebruik om energiebesparende maatregelen te plannen: 'Als duration > 120, verlaag verwarmingstemperatuur agressiever (lange dure periode)'. Helpt bij het inschatten hoeveel energieverbruik moet worden verminderd."
|
||||||
},
|
},
|
||||||
"home_type": {
|
"home_type": {
|
||||||
"description": "Type woning (appartement, huis enz.)",
|
"description": "Type woning (appartement, huis enz.)",
|
||||||
|
|
@ -450,11 +437,6 @@
|
||||||
"description": "Data-export voor dashboard-integraties",
|
"description": "Data-export voor dashboard-integraties",
|
||||||
"long_description": "Deze sensor roept de get_chartdata-service aan met jouw geconfigureerde YAML-configuratie en stelt het resultaat beschikbaar als entiteitsattributen. De status toont 'ready' wanneer data beschikbaar is, 'error' bij fouten, of 'pending' voor de eerste aanroep. Perfekt voor dashboard-integraties zoals ApexCharts die prijsgegevens uit entiteitsattributen moeten lezen.",
|
"long_description": "Deze sensor roept de get_chartdata-service aan met jouw geconfigureerde YAML-configuratie en stelt het resultaat beschikbaar als entiteitsattributen. De status toont 'ready' wanneer data beschikbaar is, 'error' bij fouten, of 'pending' voor de eerste aanroep. Perfekt voor dashboard-integraties zoals ApexCharts die prijsgegevens uit entiteitsattributen moeten lezen.",
|
||||||
"usage_tips": "Configureer de YAML-parameters in de integratie-opties om overeen te komen met jouw get_chartdata-service-aanroep. De sensor wordt automatisch bijgewerkt wanneer prijsgegevens worden bijgewerkt (typisch na middernacht en wanneer gegevens van morgen binnenkomen). Krijg toegang tot de service-responsgegevens direct vanuit de entiteitsattributen - de structuur komt exact overeen met wat get_chartdata retourneert."
|
"usage_tips": "Configureer de YAML-parameters in de integratie-opties om overeen te komen met jouw get_chartdata-service-aanroep. De sensor wordt automatisch bijgewerkt wanneer prijsgegevens worden bijgewerkt (typisch na middernacht en wanneer gegevens van morgen binnenkomen). Krijg toegang tot de service-responsgegevens direct vanuit de entiteitsattributen - de structuur komt exact overeen met wat get_chartdata retourneert."
|
||||||
},
|
|
||||||
"chart_metadata": {
|
|
||||||
"description": "Lichtgewicht metadata voor diagramconfiguratie",
|
|
||||||
"long_description": "Biedt essentiële diagramconfiguratiewaarden als sensorattributen. Nuttig voor elke grafiekkaart die Y-as-grenzen nodig heeft. De sensor roept get_chartdata aan in alleen-metadata-modus (geen dataverwerking) en extraheert: yaxis_min, yaxis_max (gesuggereerd Y-asbereik voor optimale schaling). De status weerspiegelt het service-aanroepresultaat: 'ready' bij succes, 'error' bij fouten, 'pending' tijdens initialisatie.",
|
|
||||||
"usage_tips": "Configureer via configuration.yaml onder tibber_prices.chart_metadata_config (optioneel: day, subunit_currency, resolution). De sensor wordt automatisch bijgewerkt bij prijsgegevenswijzigingen. Krijg toegang tot metadata vanuit attributen: yaxis_min, yaxis_max. Gebruik met config-template-card of elk hulpmiddel dat entiteitsattributen leest - perfect voor dynamische diagramconfiguratie zonder handmatige berekeningen."
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"binary_sensor": {
|
"binary_sensor": {
|
||||||
|
|
@ -466,7 +448,7 @@
|
||||||
"peak_price_period": {
|
"peak_price_period": {
|
||||||
"description": "Of het huidige interval tot de duurste van de dag behoort",
|
"description": "Of het huidige interval tot de duurste van de dag behoort",
|
||||||
"long_description": "Wordt geactiveerd wanneer de huidige prijs in de top 20% van de prijzen van vandaag ligt",
|
"long_description": "Wordt geactiveerd wanneer de huidige prijs in de top 20% van de prijzen van vandaag ligt",
|
||||||
"usage_tips": "Gebruik dit om te voorkomen dat je apparaten met hoog verbruik draait tijdens dure intervallen"
|
"usage_tips": "Gebruik dit om te voorkomen dat u apparaten met hoog verbruik draait tijdens dure intervallen"
|
||||||
},
|
},
|
||||||
"best_price_period": {
|
"best_price_period": {
|
||||||
"description": "Of het huidige interval tot de goedkoopste van de dag behoort",
|
"description": "Of het huidige interval tot de goedkoopste van de dag behoort",
|
||||||
|
|
@ -487,80 +469,11 @@
|
||||||
"description": "Of realtime verbruiksmonitoring actief is",
|
"description": "Of realtime verbruiksmonitoring actief is",
|
||||||
"long_description": "Geeft aan of realtime elektriciteitsverbruikmonitoring is ingeschakeld en actief voor je Tibber-woning. Dit vereist compatibele meethardware (bijv. Tibber Pulse) en een actief abonnement.",
|
"long_description": "Geeft aan of realtime elektriciteitsverbruikmonitoring is ingeschakeld en actief voor je Tibber-woning. Dit vereist compatibele meethardware (bijv. Tibber Pulse) en een actief abonnement.",
|
||||||
"usage_tips": "Gebruik dit om te verifiëren dat realtimeverbruiksgegevens beschikbaar zijn. Schakel meldingen in als dit onverwacht verandert naar 'uit', wat wijst op mogelijke hardware- of verbindingsproblemen."
|
"usage_tips": "Gebruik dit om te verifiëren dat realtimeverbruiksgegevens beschikbaar zijn. Schakel meldingen in als dit onverwacht verandert naar 'uit', wat wijst op mogelijke hardware- of verbindingsproblemen."
|
||||||
}
|
|
||||||
},
|
|
||||||
"number": {
|
|
||||||
"best_price_flex_override": {
|
|
||||||
"description": "Maximaal percentage boven de dagelijkse minimumprijs dat intervallen kunnen hebben en nog steeds als 'beste prijs' kwalificeren. Aanbevolen: 15-20 met versoepeling ingeschakeld (standaard), of 25-35 zonder versoepeling. Maximum: 50 (harde limiet voor betrouwbare periodedetectie).",
|
|
||||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Flexibiliteit'-instelling uit de opties-dialoog voor beste prijs-periodeberekeningen.",
|
|
||||||
"usage_tips": "Schakel deze entiteit in om beste prijs-detectie dynamisch aan te passen via automatiseringen, bijv. hogere flexibiliteit voor kritieke lasten of strengere eisen voor flexibele apparaten."
|
|
||||||
},
|
},
|
||||||
"best_price_min_distance_override": {
|
"chart_data_export": {
|
||||||
"description": "Minimale procentuele afstand onder het daggemiddelde. Intervallen moeten zo ver onder het gemiddelde liggen om als 'beste prijs' te kwalificeren. Helpt echte lage prijsperioden te onderscheiden van gemiddelde prijzen.",
|
"description": "Gegevensexport voor dashboardintegraties",
|
||||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimale afstand'-instelling uit de opties-dialoog voor beste prijs-periodeberekeningen.",
|
"long_description": "Deze binaire sensor roept de get_chartdata-service aan om gegevens voor dashboard-widgets te exporteren. Ondersteunt ApexCharts en andere dashboardoplossingen die prijsgegevens willen visualiseren.",
|
||||||
"usage_tips": "Verhoog de waarde voor strengere beste prijs-criteria. Verlaag als te weinig perioden worden gedetecteerd."
|
"usage_tips": "Configureer de YAML-parameters in de integratieopties onder 'Geavanceerd'. Deze sensor biedt meestal geen praktische waarde in automatiseringen - hij dient hoofdzakelijk als servicecontainer voor dashboardgebruik. Raadpleeg de documentatie voor specifieke parameterformat."
|
||||||
},
|
|
||||||
"best_price_min_period_length_override": {
|
|
||||||
"description": "Minimale periodelengte in 15-minuten intervallen. Perioden korter dan dit worden niet gerapporteerd. Voorbeeld: 2 = minimaal 30 minuten.",
|
|
||||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimale periodelengte'-instelling uit de opties-dialoog voor beste prijs-periodeberekeningen.",
|
|
||||||
"usage_tips": "Pas aan op typische apparaatlooptijd: 2 (30 min) voor snelle programma's, 4-8 (1-2 uur) voor normale cycli, 8+ voor lange ECO-programma's."
|
|
||||||
},
|
|
||||||
"best_price_min_periods_override": {
|
|
||||||
"description": "Minimum aantal beste prijs-perioden om dagelijks te vinden. Wanneer versoepeling is ingeschakeld, past het systeem automatisch de criteria aan om dit aantal te bereiken.",
|
|
||||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimum periodes'-instelling uit de opties-dialoog voor beste prijs-periodeberekeningen.",
|
|
||||||
"usage_tips": "Stel dit in op het aantal tijdkritieke taken dat je dagelijks hebt. Voorbeeld: 2 voor twee wasladingen."
|
|
||||||
},
|
|
||||||
"best_price_relaxation_attempts_override": {
|
|
||||||
"description": "Aantal pogingen om de criteria geleidelijk te versoepelen om het minimum aantal perioden te bereiken. Elke poging verhoogt de flexibiliteit met 3 procent. Bij 0 worden alleen basiscriteria gebruikt.",
|
|
||||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Versoepeling pogingen'-instelling uit de opties-dialoog voor beste prijs-periodeberekeningen.",
|
|
||||||
"usage_tips": "Hogere waarden maken periodedetectie adaptiever voor dagen met stabiele prijzen. Stel in op 0 om strikte criteria af te dwingen zonder versoepeling."
|
|
||||||
},
|
|
||||||
"best_price_gap_count_override": {
|
|
||||||
"description": "Maximum aantal duurdere intervallen dat mag worden toegestaan tussen goedkope intervallen terwijl ze nog steeds als één aaneengesloten periode tellen. Bij 0 moeten goedkope intervallen opeenvolgend zijn.",
|
|
||||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Gap tolerantie'-instelling uit de opties-dialoog voor beste prijs-periodeberekeningen.",
|
|
||||||
"usage_tips": "Verhoog dit voor apparaten met variabele belasting (bijv. warmtepompen) die korte duurdere intervallen kunnen tolereren. Stel in op 0 voor continu goedkope perioden."
|
|
||||||
},
|
|
||||||
"peak_price_flex_override": {
|
|
||||||
"description": "Maximaal percentage onder de dagelijkse maximumprijs dat intervallen kunnen hebben en nog steeds als 'piekprijs' kwalificeren. Dezelfde aanbevelingen als voor beste prijs-flexibiliteit.",
|
|
||||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Flexibiliteit'-instelling uit de opties-dialoog voor piekprijs-periodeberekeningen.",
|
|
||||||
"usage_tips": "Gebruik dit om de piekprijs-drempel tijdens runtime aan te passen voor automatiseringen die verbruik tijdens dure uren vermijden."
|
|
||||||
},
|
|
||||||
"peak_price_min_distance_override": {
|
|
||||||
"description": "Minimale procentuele afstand boven het daggemiddelde. Intervallen moeten zo ver boven het gemiddelde liggen om als 'piekprijs' te kwalificeren.",
|
|
||||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimale afstand'-instelling uit de opties-dialoog voor piekprijs-periodeberekeningen.",
|
|
||||||
"usage_tips": "Verhoog de waarde om alleen extreme prijspieken te vangen. Verlaag om meer dure tijden mee te nemen."
|
|
||||||
},
|
|
||||||
"peak_price_min_period_length_override": {
|
|
||||||
"description": "Minimale periodelengte in 15-minuten intervallen voor piekprijzen. Kortere prijspieken worden niet als perioden gerapporteerd.",
|
|
||||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimale periodelengte'-instelling uit de opties-dialoog voor piekprijs-periodeberekeningen.",
|
|
||||||
"usage_tips": "Kortere waarden vangen korte prijspieken. Langere waarden focussen op aanhoudende dure perioden."
|
|
||||||
},
|
|
||||||
"peak_price_min_periods_override": {
|
|
||||||
"description": "Minimum aantal piekprijs-perioden om dagelijks te vinden.",
|
|
||||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimum periodes'-instelling uit de opties-dialoog voor piekprijs-periodeberekeningen.",
|
|
||||||
"usage_tips": "Stel dit in op basis van hoeveel dure perioden je per dag wilt vangen voor automatiseringen."
|
|
||||||
},
|
|
||||||
"peak_price_relaxation_attempts_override": {
|
|
||||||
"description": "Aantal pogingen om de criteria te versoepelen om het minimum aantal piekprijs-perioden te bereiken.",
|
|
||||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Versoepeling pogingen'-instelling uit de opties-dialoog voor piekprijs-periodeberekeningen.",
|
|
||||||
"usage_tips": "Verhoog dit als geen perioden worden gevonden op dagen met stabiele prijzen. Stel in op 0 om strikte criteria af te dwingen."
|
|
||||||
},
|
|
||||||
"peak_price_gap_count_override": {
|
|
||||||
"description": "Maximum aantal goedkopere intervallen dat mag worden toegestaan tussen dure intervallen terwijl ze nog steeds als één piekprijs-periode tellen.",
|
|
||||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Gap tolerantie'-instelling uit de opties-dialoog voor piekprijs-periodeberekeningen.",
|
|
||||||
"usage_tips": "Hogere waarden vangen langere dure perioden zelfs met korte prijsdips. Stel in op 0 voor strikt aaneengesloten piekprijzen."
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"switch": {
|
|
||||||
"best_price_enable_relaxation_override": {
|
|
||||||
"description": "Indien ingeschakeld, worden criteria automatisch versoepeld om het minimum aantal perioden te bereiken. Indien uitgeschakeld, worden alleen perioden gerapporteerd die aan strikte criteria voldoen (mogelijk nul perioden op dagen met stabiele prijzen).",
|
|
||||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimum aantal bereiken'-instelling uit de opties-dialoog voor beste prijs-periodeberekeningen.",
|
|
||||||
"usage_tips": "Schakel dit in voor gegarandeerde dagelijkse automatiseringsmogelijkheden. Schakel uit als je alleen echt goedkope perioden wilt, ook als dat betekent dat er op sommige dagen geen perioden zijn."
|
|
||||||
},
|
|
||||||
"peak_price_enable_relaxation_override": {
|
|
||||||
"description": "Indien ingeschakeld, worden criteria automatisch versoepeld om het minimum aantal perioden te bereiken. Indien uitgeschakeld, worden alleen echte prijspieken gerapporteerd.",
|
|
||||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimum aantal bereiken'-instelling uit de opties-dialoog voor piekprijs-periodeberekeningen.",
|
|
||||||
"usage_tips": "Schakel dit in voor consistente piekprijs-waarschuwingen. Schakel uit om alleen extreme prijspieken te vangen."
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"home_types": {
|
"home_types": {
|
||||||
|
|
|
||||||
|
|
@ -1,20 +1,7 @@
|
||||||
{
|
{
|
||||||
"apexcharts": {
|
"apexcharts": {
|
||||||
"title_rating_level": "Prisfaser dagsprogress",
|
"title_rating_level": "Prisfaser daglig framsteg",
|
||||||
"title_level": "Prisnivå",
|
"title_level": "Prisnivå"
|
||||||
"hourly_suffix": "(Ø per timme)",
|
|
||||||
"best_price_period_name": "Bästa prisperiod",
|
|
||||||
"peak_price_period_name": "Toppprisperiod",
|
|
||||||
"notification": {
|
|
||||||
"metadata_sensor_unavailable": {
|
|
||||||
"title": "Tibber Prices: ApexCharts YAML genererad med begränsad funktionalitet",
|
|
||||||
"message": "Du har precis genererat en ApexCharts-kortkonfiguration via Utvecklarverktyg. Diagram-metadata-sensorn är inaktiverad, så den genererade YAML:en visar bara **grundläggande funktionalitet** (auto-skalning, fast gradient vid 50%).\n\n**För full funktionalitet** (optimerad skalning, dynamiska gradientfärger):\n1. [Öppna Tibber Prices-integrationen](https://my.home-assistant.io/redirect/integration/?domain=tibber_prices)\n2. Aktivera 'Chart Metadata'-sensorn\n3. **Generera YAML:en igen** via Utvecklarverktyg\n4. **Ersätt den gamla YAML:en** i din instrumentpanel med den nya versionen\n\n⚠️ Det räcker inte att bara aktivera sensorn - du måste regenerera och ersätta YAML-koden!"
|
|
||||||
},
|
|
||||||
"missing_cards": {
|
|
||||||
"title": "Tibber Prices: ApexCharts YAML kan inte användas",
|
|
||||||
"message": "Du har precis genererat en ApexCharts-kortkonfiguration via Utvecklarverktyg, men den genererade YAML:en **kommer inte att fungera** eftersom nödvändiga anpassade kort saknas.\n\n**Saknade kort:**\n{cards}\n\n**För att använda den genererade YAML:en:**\n1. Klicka på länkarna ovan för att installera de saknade korten från HACS\n2. Starta om Home Assistant (ibland nödvändigt)\n3. **Generera YAML:en igen** via Utvecklarverktyg\n4. Lägg till YAML:en i din instrumentpanel\n\n⚠️ Den nuvarande YAML-koden fungerar inte förrän alla kort är installerade!"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"sensor": {
|
"sensor": {
|
||||||
"current_interval_price": {
|
"current_interval_price": {
|
||||||
|
|
@ -22,7 +9,7 @@
|
||||||
"long_description": "Visar nuvarande pris per kWh från ditt Tibber-abonnemang",
|
"long_description": "Visar nuvarande pris per kWh från ditt Tibber-abonnemang",
|
||||||
"usage_tips": "Använd detta för att spåra priser eller skapa automationer som körs när el är billig"
|
"usage_tips": "Använd detta för att spåra priser eller skapa automationer som körs när el är billig"
|
||||||
},
|
},
|
||||||
"current_interval_price_base": {
|
"current_interval_price_major": {
|
||||||
"description": "Nuvarande elpris i huvudvaluta (EUR/kWh, NOK/kWh, osv.) för Energipanelen",
|
"description": "Nuvarande elpris i huvudvaluta (EUR/kWh, NOK/kWh, osv.) för Energipanelen",
|
||||||
"long_description": "Visar nuvarande pris per kWh i huvudvaluta-enheter (t.ex. EUR/kWh istället för ct/kWh, NOK/kWh istället för øre/kWh). Denna sensor är speciellt utformad för användning med Home Assistants Energipanel, som kräver priser i standardvalutaenheter.",
|
"long_description": "Visar nuvarande pris per kWh i huvudvaluta-enheter (t.ex. EUR/kWh istället för ct/kWh, NOK/kWh istället för øre/kWh). Denna sensor är speciellt utformad för användning med Home Assistants Energipanel, som kräver priser i standardvalutaenheter.",
|
||||||
"usage_tips": "Använd denna sensor när du konfigurerar Energipanelen under Inställningar → Instrumentpaneler → Energi. Välj denna sensor som 'Entitet med nuvarande pris' för att automatiskt beräkna dina energikostnader. Energipanelen multiplicerar din energiförbrukning (kWh) med detta pris för att visa totala kostnader."
|
"usage_tips": "Använd denna sensor när du konfigurerar Energipanelen under Inställningar → Instrumentpaneler → Energi. Välj denna sensor som 'Entitet med nuvarande pris' för att automatiskt beräkna dina energikostnader. Energipanelen multiplicerar din energiförbrukning (kWh) med detta pris för att visa totala kostnader."
|
||||||
|
|
@ -58,9 +45,9 @@
|
||||||
"usage_tips": "Använd detta för att undvika att köra apparater under topppristider"
|
"usage_tips": "Använd detta för att undvika att köra apparater under topppristider"
|
||||||
},
|
},
|
||||||
"average_price_today": {
|
"average_price_today": {
|
||||||
"description": "Typiskt elpris för idag per kWh (konfigurerbart visningsformat)",
|
"description": "Det genomsnittliga elpriset för idag per kWh",
|
||||||
"long_description": "Visar priset per kWh för nuvarande dag från ditt Tibber-abonnemang. **Som standard visar statusen medianen** (motståndskraftig mot extrema prispikar, visar typisk prisnåvå). Du kan ändra detta i integrationsinstllningarna för att visa det aritmetiska medelvärdet istället. Det alternativa värdet är tillgängligt som attribut.",
|
"long_description": "Visar genomsnittspriset per kWh för nuvarande dag från ditt Tibber-abonnemang",
|
||||||
"usage_tips": "Använd detta som baslinje för att jämföra nuvarande priser. För beräkningar använd: {{ state_attr('sensor.average_price_today', 'price_mean') }}"
|
"usage_tips": "Använd detta som baslinje för att jämföra nuvarande priser"
|
||||||
},
|
},
|
||||||
"lowest_price_tomorrow": {
|
"lowest_price_tomorrow": {
|
||||||
"description": "Det lägsta elpriset för imorgon per kWh",
|
"description": "Det lägsta elpriset för imorgon per kWh",
|
||||||
|
|
@ -73,9 +60,9 @@
|
||||||
"usage_tips": "Använd detta för att undvika att köra apparater under morgondagens topppristider. Användbart för att planera runt dyra perioder."
|
"usage_tips": "Använd detta för att undvika att köra apparater under morgondagens topppristider. Användbart för att planera runt dyra perioder."
|
||||||
},
|
},
|
||||||
"average_price_tomorrow": {
|
"average_price_tomorrow": {
|
||||||
"description": "Typiskt elpris för imorgon per kWh (konfigurerbart visningsformat)",
|
"description": "Det genomsnittliga elpriset för imorgon per kWh",
|
||||||
"long_description": "Visar priset per kWh för morgondagen från ditt Tibber-abonnemang. **Som standard visar statusen medianen** (motståndskraftig mot extrema prispikar). Du kan ändra detta i integrationsinstllningarna för att visa det aritmetiska medelvärdet istället. Det alternativa värdet är tillgängligt som attribut. Denna sensor blir otillgänglig tills morgondagens data publiceras av Tibber (vanligtvis runt 13:00-14:00 CET).",
|
"long_description": "Visar genomsnittspriset per kWh för morgondagen från ditt Tibber-abonnemang. Denna sensor blir otillgänglig tills morgondagens data publiceras av Tibber (vanligtvis runt 13:00-14:00 CET).",
|
||||||
"usage_tips": "Använd detta som baslinje för att jämföra morgondagens priser och planera konsumtion. Jämför med dagens median för att se om morgondagen kommer att bli dyrare eller billigare totalt sett."
|
"usage_tips": "Använd detta som baslinje för att jämföra morgondagens priser och planera konsumtion. Jämför med dagens genomsnitt för att se om morgondagen kommer att bli dyrare eller billigare totalt sett."
|
||||||
},
|
},
|
||||||
"yesterday_price_level": {
|
"yesterday_price_level": {
|
||||||
"description": "Aggregerad prisnivå för igår",
|
"description": "Aggregerad prisnivå för igår",
|
||||||
|
|
@ -108,14 +95,14 @@
|
||||||
"usage_tips": "Använd detta för att planera imorgonens energiförbrukning baserat på dina personliga priströskelvärden. Jämför med idag för att avgöra om du ska skjuta upp förbrukning till imorgon eller använda energi idag."
|
"usage_tips": "Använd detta för att planera imorgonens energiförbrukning baserat på dina personliga priströskelvärden. Jämför med idag för att avgöra om du ska skjuta upp förbrukning till imorgon eller använda energi idag."
|
||||||
},
|
},
|
||||||
"trailing_price_average": {
|
"trailing_price_average": {
|
||||||
"description": "Typiskt elpris för de senaste 24 timmarna per kWh (konfigurerbart visningsformat)",
|
"description": "Det genomsnittliga elpriset för de senaste 24 timmarna per kWh",
|
||||||
"long_description": "Visar priset per kWh beräknat från de senaste 24 timmarna. **Som standard visar statusen medianen** (motståndskraftig mot extrema prispikar, visar typisk prisnåvå). Du kan ändra detta i integrationsinstllningarna för att visa det aritmetiska medelvärdet istället. Det alternativa värdet är tillgängligt som attribut. Uppdateras var 15:e minut.",
|
"long_description": "Visar genomsnittspriset per kWh beräknat från de senaste 24 timmarna (rullande genomsnitt) från ditt Tibber-abonnemang. Detta ger ett rullande genomsnitt som uppdateras var 15:e minut baserat på historiska data.",
|
||||||
"usage_tips": "Använd statusvärdet för att se den typiska nuvarande prisnåvån. För kostnadsberäkningar använd: {{ state_attr('sensor.trailing_price_average', 'price_mean') }}"
|
"usage_tips": "Använd detta för att jämföra nuvarande priser mot senaste trender. Ett nuvarande pris som ligger väsentligt över detta genomsnitt kan indikera ett bra tillfälle att minska konsumtionen."
|
||||||
},
|
},
|
||||||
"leading_price_average": {
|
"leading_price_average": {
|
||||||
"description": "Typiskt elpris för nästa 24 timmar per kWh (konfigurerbart visningsformat)",
|
"description": "Det genomsnittliga elpriset för nästa 24 timmar per kWh",
|
||||||
"long_description": "Visar priset per kWh beräknat från nästa 24 timmar. **Som standard visar statusen medianen** (motståndskraftig mot extrema prispikar, visar förväntad prisnåvå). Du kan ändra detta i integrationsinstllningarna för att visa det aritmetiska medelvärdet istället. Det alternativa värdet är tillgängligt som attribut.",
|
"long_description": "Visar genomsnittspriset per kWh beräknat från nästa 24 timmar (framåtblickande genomsnitt) från ditt Tibber-abonnemang. Detta ger ett framåtblickande genomsnitt baserat på tillgängliga prognosdata.",
|
||||||
"usage_tips": "Använd statusvärdet för att se den typiska kommande prisnåvån. För kostnadsberäkningar använd: {{ state_attr('sensor.leading_price_average', 'price_mean') }}"
|
"usage_tips": "Använd detta för att planera energianvändning. Om nuvarande pris är under det framåtblickande genomsnittet kan det vara ett bra tillfälle att köra energikrävande apparater."
|
||||||
},
|
},
|
||||||
"trailing_price_min": {
|
"trailing_price_min": {
|
||||||
"description": "Det minsta elpriset för de senaste 24 timmarna per kWh",
|
"description": "Det minsta elpriset för de senaste 24 timmarna per kWh",
|
||||||
|
|
@ -292,74 +279,64 @@
|
||||||
"long_description": "Visar tidsstämpeln för det senaste tillgängliga prisdataintervallet från ditt Tibber-abonnemang"
|
"long_description": "Visar tidsstämpeln för det senaste tillgängliga prisdataintervallet från ditt Tibber-abonnemang"
|
||||||
},
|
},
|
||||||
"today_volatility": {
|
"today_volatility": {
|
||||||
"description": "Hur mycket elpriserna varierar idag",
|
"description": "Prisvolatilitetsklassificering för idag",
|
||||||
"long_description": "Visar om dagens priser är stabila eller har stora svängningar. Låg volatilitet innebär ganska jämna priser – timing spelar liten roll. Hög volatilitet innebär tydliga prisskillnader under dagen – bra tillfälle att flytta förbrukning till billigare perioder. `price_coefficient_variation_%` visar procentvärdet, `price_spread` visar den absoluta prisspannet.",
|
"long_description": "Visar hur mycket elpriserna varierar under dagen baserat på spridningen (skillnaden mellan högsta och lägsta pris). Klassificering: LÅG = spridning < 5 öre, MÅTTLIG = 5-15 öre, HÖG = 15-30 öre, MYCKET HÖG = >30 öre.",
|
||||||
"usage_tips": "Använd detta för att avgöra om optimering är värt besväret. Vid låg volatilitet kan du köra enheter när som helst. Vid hög volatilitet sparar du märkbart genom att följa Best Price-perioder."
|
"usage_tips": "Använd detta för att avgöra om prisbaserad optimering är värt besväret. Till exempel, med ett balkongbatteri som har 15% effektivitetsförlust är optimering endast meningsfull när volatiliteten är åtminstone MÅTTLIG. Skapa automationer som kontrollerar volatiliteten innan laddnings-/urladdningscykler planeras."
|
||||||
},
|
},
|
||||||
"tomorrow_volatility": {
|
"tomorrow_volatility": {
|
||||||
"description": "Hur mycket elpriserna kommer att variera i morgon",
|
"description": "Prisvolatilitetsklassificering för imorgon",
|
||||||
"long_description": "Visar om priserna i morgon blir stabila eller får stora svängningar. Tillgänglig när morgondagens data är publicerad (vanligen 13:00–14:00 CET). Låg volatilitet innebär ganska jämna priser – timing är inte kritisk. Hög volatilitet innebär tydliga prisskillnader under dagen – bra läge att planera energikrävande uppgifter. `price_coefficient_variation_%` visar procentvärdet, `price_spread` visar den absoluta prisspannet.",
|
"long_description": "Visar hur mycket elpriserna kommer att variera under morgondagen baserat på spridningen (skillnaden mellan högsta och lägsta pris). Blir otillgänglig tills morgondagens data publiceras (vanligtvis 13:00-14:00 CET).",
|
||||||
"usage_tips": "Använd för att planera morgondagens förbrukning. Hög volatilitet? Planera flexibla laster i Best Price-perioder. Låg volatilitet? Kör enheter när det passar dig."
|
"usage_tips": "Använd detta för förhandsplanering av morgondagens energianvändning. Om morgondagen har HÖG eller MYCKET HÖG volatilitet är det värt att optimera energiförbrukningstiming. Vid LÅG volatilitet kan du köra enheter när som helst utan betydande kostnadsskillnader."
|
||||||
},
|
},
|
||||||
"next_24h_volatility": {
|
"next_24h_volatility": {
|
||||||
"description": "Hur mycket priserna varierar de kommande 24 timmarna",
|
"description": "Prisvolatilitetsklassificering för rullande nästa 24 timmar",
|
||||||
"long_description": "Visar prisvolatilitet för ett rullande 24-timmarsfönster från nu (uppdateras var 15:e minut). Låg volatilitet innebär ganska jämna priser. Hög volatilitet innebär märkbara prissvängningar och därmed optimeringsmöjligheter. Till skillnad från idag/i morgon-sensorer korsar den här dagsgränser och ger en kontinuerlig framåtblickande bedömning. `price_coefficient_variation_%` visar procentvärdet, `price_spread` visar den absoluta prisspannet.",
|
"long_description": "Visar hur mycket elpriserna varierar under de nästa 24 timmarna från nu (rullande fönster). Detta korsar daggränser och uppdateras var 15:e minut, vilket ger en framåtblickande volatilitetsbedömning oberoende av kalenderdagar.",
|
||||||
"usage_tips": "Bäst för beslut i realtid. Använd vid planering av batteriladdning eller andra flexibla laster som kan gå över midnatt. Ger en konsekvent 24h-bild oberoende av kalenderdag."
|
"usage_tips": "Bästa sensorn för realtidsoptimeringsbeslut. Till skillnad från idag/imorgon-sensorer som växlar vid midnatt ger detta en kontinuerlig 24t volatilitetsbedömning. Använd för batteriladningsstrategier som sträcker sig över daggränser."
|
||||||
},
|
},
|
||||||
"today_tomorrow_volatility": {
|
"today_tomorrow_volatility": {
|
||||||
"description": "Kombinerad prisvolatilitet för idag och imorgon",
|
"description": "Kombinerad prisvolatilitetsklassificering för idag och imorgon",
|
||||||
"long_description": "Visar den samlade volatiliteten när idag och imorgon ses tillsammans (när morgondatan finns). Visar om det finns tydliga prisskillnader över dagsgränsen. Faller tillbaka till endast idag om morgondatan saknas. Nyttig för flerdagarsoptimering. `price_coefficient_variation_%` visar procentvärdet, `price_spread` visar den absoluta prisspannet.",
|
"long_description": "Visar volatilitet över både idag och imorgon kombinerat (när morgondagens data är tillgänglig). Ger en utökad vy av prisvariation över upp till 48 timmar. Faller tillbaka till endast idag när morgondagens data inte är tillgänglig ännu.",
|
||||||
"usage_tips": "Använd för uppgifter som sträcker sig över flera dagar. Kontrollera om prisskillnaderna är stora nog för att planera efter. De enskilda dag-sensorerna visar bidrag per dag om du behöver mer detaljer."
|
"usage_tips": "Använd detta för flerdagarsplanering och för att förstå om prismöjligheter existerar över dagsgränsen. Attributen 'today_volatility' och 'tomorrow_volatility' visar individuella dagsbidrag. Användbart för planering av laddningssessioner som kan sträcka sig över midnatt."
|
||||||
},
|
},
|
||||||
"data_lifecycle_status": {
|
"data_lifecycle_status": {
|
||||||
"description": "Gjeldende tilstand for prisdatalivssyklus og hurtigbufring",
|
"description": "Aktuell status för prisdatalivscykel och cachning",
|
||||||
"long_description": "Viser om integrasjonen bruker hurtigbufrede data eller ferske data fra API-et. Viser gjeldende livssyklustilstand: 'cached' (bruker lagrede data), 'fresh' (nettopp hentet fra API), 'refreshing' (henter for øyeblikket), 'searching_tomorrow' (søker aktivt etter morgendagens data etter 13:00), 'turnover_pending' (innen 15 minutter før midnatt, 23:45-00:00), eller 'error' (henting mislyktes). Inkluderer omfattende attributter som cache-alder, neste API-spørring, datafullstendighet og API-anropsstatistikk.",
|
"long_description": "Visar om integrationen använder cachad data eller färsk data från API:et. Visar aktuell livscykelstatus: 'cached' (använder lagrad data), 'fresh' (nyss hämtad från API), 'refreshing' (hämtar för närvarande), 'searching_tomorrow' (söker aktivt efter morgondagens data efter 13:00), 'turnover_pending' (inom 15 minuter före midnatt, 23:45-00:00), eller 'error' (hämtning misslyckades). Inkluderar omfattande attribut som cache-ålder, nästa API-polling, datafullständighet och API-anropsstatistik.",
|
||||||
"usage_tips": "Bruk denne diagnosesensoren for å forstå dataferskhet og API-anropsmønstre. Sjekk 'cache_age'-attributtet for å se hvor gamle de nåværende dataene er. Overvåk 'next_api_poll' for å vite når neste oppdatering er planlagt. Bruk 'data_completeness' for å se om data for i går/i dag/i morgen er tilgjengelig. 'api_calls_today'-telleren hjelper med å spore API-bruk. Perfekt for feilsøking eller forståelse av integrasjonens oppførsel."
|
"usage_tips": "Använd denna diagnostiksensor för att förstå datafärskhet och API-anropsmönster. Kontrollera 'cache_age'-attributet för att se hur gammal den aktuella datan är. Övervaka 'next_api_poll' för att veta när nästa uppdatering är schemalagd. Använd 'data_completeness' för att se om data för igår/idag/imorgon är tillgänglig. Räknaren 'api_calls_today' hjälper till att spåra API-användning. Perfekt för felsökning eller förståelse av integrationens beteende."
|
||||||
},
|
},
|
||||||
"best_price_end_time": {
|
"best_price_end_time": {
|
||||||
"description": "Total längd för nuvarande eller nästa billigperiod (state i timmar, attribut i minuter)",
|
"description": "När nuvarande eller nästa billigperiod slutar",
|
||||||
"long_description": "Visar hur länge billigperioden varar. State använder timmar (decimal) för en läsbar UI; attributet `period_duration_minutes` behåller avrundade minuter för automationer. Aktiv → varaktighet för aktuell period, annars nästa.",
|
"long_description": "Visar sluttidsstämpeln för nuvarande billigperiod när aktiv, eller slutet av nästa period när ingen period är aktiv. Visar alltid en användbar tidsreferens för planering. Returnerar 'Okänt' endast när inga perioder är konfigurerade.",
|
||||||
"usage_tips": "UI kan visa 1,5 h medan `period_duration_minutes` = 90 för automationer."
|
"usage_tips": "Använd detta för att visa en nedräkning som 'Billigperiod slutar om 2 timmar' (när aktiv) eller 'Nästa billigperiod slutar kl 14:00' (när inaktiv). Home Assistant visar automatiskt relativ tid för tidsstämpelsensorer."
|
||||||
},
|
|
||||||
"best_price_period_duration": {
|
|
||||||
"description": "Längd på nuvarande/nästa billigperiod",
|
|
||||||
"long_description": "Total längd av nuvarande eller nästa billigperiod. State visas i timmar (t.ex. 1,5 h) för enkel avläsning i UI, medan attributet `period_duration_minutes` ger samma värde i minuter (t.ex. 90) för automationer. Detta värde representerar den **fullständigt planerade längden** av perioden och är konstant under hela perioden, även när återstående tid (remaining_minutes) minskar.",
|
|
||||||
"usage_tips": "Kombinera med remaining_minutes för att beräkna när långvariga enheter ska stoppas: Perioden startade för `period_duration_minutes - remaining_minutes` minuter sedan. Detta attribut stöder energioptimeringsstrategier genom att hjälpa till med att planera högförbruksaktiviteter inom billiga perioder."
|
|
||||||
},
|
},
|
||||||
"best_price_remaining_minutes": {
|
"best_price_remaining_minutes": {
|
||||||
"description": "Tid kvar i nuvarande billigperiod",
|
"description": "Återstående minuter i nuvarande billigperiod (0 när inaktiv)",
|
||||||
"long_description": "Visar hur mycket tid som återstår i nuvarande billigperiod. State visas i timmar (t.ex. 0,75 h) för enkel avläsning i instrumentpaneler, medan attributet `remaining_minutes` ger samma tid i minuter (t.ex. 45) för automationsvillkor. **Nedräkningstimer**: Detta värde minskar varje minut under en aktiv period. Returnerar 0 när ingen billigperiod är aktiv. Uppdateras varje minut.",
|
"long_description": "Visar hur många minuter som återstår i nuvarande billigperiod. Returnerar 0 när ingen period är aktiv. Uppdateras varje minut. Kontrollera binary_sensor.best_price_period för att se om en period är aktiv.",
|
||||||
"usage_tips": "För automationer: Använd attribut `remaining_minutes` som 'Om remaining_minutes > 60, starta diskmaskin nu (tillräckligt med tid för att slutföra)' eller 'Om remaining_minutes < 15, avsluta nuvarande cykel snart'. UI visar användarvänliga timmar (t.ex. 1,25 h). Värde 0 indikerar ingen aktiv billigperiod."
|
"usage_tips": "Perfekt för automationer: 'Om remaining_minutes > 0 OCH remaining_minutes < 30, starta tvättmaskin nu'. Värdet 0 gör det enkelt att kontrollera om en period är aktiv (värde > 0) eller inte (värde = 0)."
|
||||||
},
|
},
|
||||||
"best_price_progress": {
|
"best_price_progress": {
|
||||||
"description": "Framsteg genom nuvarande billigperiod (0% när inaktiv)",
|
"description": "Framsteg genom nuvarande billigperiod (0% när inaktiv)",
|
||||||
"long_description": "Visar framsteg genom nuvarande billigperiod som 0-100%. Returnerar 0% när ingen period är aktiv. Uppdateras varje minut. 0% betyder att perioden just startade, 100% betyder att den snart slutar.",
|
"long_description": "Visar framsteg genom nuvarande billigperiod som 0-100%. Returnerar 0% när ingen period är aktiv. Uppdateras varje minut. 0% betyder period just startad, 100% betyder den snart slutar.",
|
||||||
"usage_tips": "Perfekt för visuella framstegsindikatorer. Använd i automationer: 'Om progress > 0 OCH progress > 75, skicka avisering om att billigperioden snart slutar'. Värde 0 indikerar ingen aktiv period."
|
"usage_tips": "Bra för visuella framstegsstaplar. Använd i automationer: 'Om progress > 0 OCH progress > 75, skicka meddelande att billigperiod snart slutar'. Värde 0 indikerar ingen aktiv period."
|
||||||
},
|
},
|
||||||
"best_price_next_start_time": {
|
"best_price_next_start_time": {
|
||||||
"description": "Total längd för nuvarande eller nästa dyrperiod (state i timmar, attribut i minuter)",
|
"description": "När nästa billigperiod startar",
|
||||||
"long_description": "Visar hur länge den dyra perioden varar. State använder timmar (decimal) för UI; attributet `period_duration_minutes` behåller avrundade minuter för automationer. Aktiv → varaktighet för aktuell period, annars nästa.",
|
"long_description": "Visar när nästa kommande billigperiod startar. Under en aktiv period visar detta starten av NÄSTA period efter den nuvarande. Returnerar 'Okänt' endast när inga framtida perioder är konfigurerade.",
|
||||||
"usage_tips": "UI kan visa 0,75 h medan `period_duration_minutes` = 45 för automationer."
|
"usage_tips": "Alltid användbart för framåtplanering: 'Nästa billigperiod startar om 3 timmar' (oavsett om du är i en period nu eller inte). Kombinera med automationer: 'När nästa starttid är om 10 minuter, skicka meddelande för att förbereda tvättmaskin'."
|
||||||
},
|
},
|
||||||
"best_price_next_in_minutes": {
|
"best_price_next_in_minutes": {
|
||||||
"description": "Tid kvar i nuvarande dyrperiod (state i timmar, attribut i minuter)",
|
"description": "Minuter tills nästa billigperiod startar (0 vid övergång)",
|
||||||
"long_description": "Visar hur mycket tid som återstår. State använder timmar (decimal); attributet `remaining_minutes` behåller avrundade minuter för automationer. Returnerar 0 när ingen period är aktiv. Uppdateras varje minut.",
|
"long_description": "Visar minuter tills nästa billigperiod startar. Under en aktiv period visar detta tiden till perioden EFTER den nuvarande. Returnerar 0 under korta övergångsmoment. Uppdateras varje minut.",
|
||||||
"usage_tips": "Använd `remaining_minutes` för trösklar (t.ex. > 60) medan state är lätt att läsa i timmar."
|
"usage_tips": "Perfekt för 'vänta tills billigperiod' automationer: 'Om next_in_minutes > 0 OCH next_in_minutes < 15, vänta innan diskmaskin startas'. Värde > 0 indikerar alltid att en framtida period är planerad."
|
||||||
},
|
},
|
||||||
"peak_price_end_time": {
|
"peak_price_end_time": {
|
||||||
"description": "Tid tills nästa dyrperiod startar (state i timmar, attribut i minuter)",
|
"description": "När nuvarande eller nästa dyrperiod slutar",
|
||||||
"long_description": "Visar hur länge tills nästa dyrperiod startar. State använder timmar (decimal); attributet `next_in_minutes` behåller avrundade minuter för automationer. Under en aktiv period visar detta tiden till perioden efter den aktuella. 0 under korta övergångar. Uppdateras varje minut.",
|
"long_description": "Visar sluttidsstämpeln för nuvarande dyrperiod när aktiv, eller slutet av nästa period när ingen period är aktiv. Visar alltid en användbar tidsreferens för planering. Returnerar 'Okänt' endast när inga perioder är konfigurerade.",
|
||||||
"usage_tips": "Använd `next_in_minutes` i automationer (t.ex. < 10) medan state är lätt att läsa i timmar."
|
"usage_tips": "Använd detta för att visa 'Dyrperiod slutar om 1 timme' (när aktiv) eller 'Nästa dyrperiod slutar kl 18:00' (när inaktiv). Kombinera med automationer för att återuppta drift efter topp."
|
||||||
},
|
|
||||||
"peak_price_period_duration": {
|
|
||||||
"description": "Längd på nuvarande/nästa dyrperiod",
|
|
||||||
"long_description": "Total längd av nuvarande eller nästa dyrperiod. State visas i timmar (t.ex. 1,5 h) för enkel avläsning i UI, medan attributet `period_duration_minutes` ger samma värde i minuter (t.ex. 90) för automationer. Detta värde representerar den **fullständigt planerade längden** av perioden och är konstant under hela perioden, även när återstående tid (remaining_minutes) minskar.",
|
|
||||||
"usage_tips": "Kombinera med remaining_minutes för att beräkna när långvariga enheter ska stoppas: Perioden startade för `period_duration_minutes - remaining_minutes` minuter sedan. Detta attribut stöder energibesparingsstrategier genom att hjälpa till med att planera högförbruksaktiviteter utanför dyra perioder."
|
|
||||||
},
|
},
|
||||||
"peak_price_remaining_minutes": {
|
"peak_price_remaining_minutes": {
|
||||||
"description": "Tid kvar i nuvarande dyrperiod",
|
"description": "Återstående minuter i nuvarande dyrperiod (0 när inaktiv)",
|
||||||
"long_description": "Visar hur mycket tid som återstår i nuvarande dyrperiod. State visas i timmar (t.ex. 0,75 h) för enkel avläsning i instrumentpaneler, medan attributet `remaining_minutes` ger samma tid i minuter (t.ex. 45) för automationsvillkor. **Nedräkningstimer**: Detta värde minskar varje minut under en aktiv period. Returnerar 0 när ingen dyrperiod är aktiv. Uppdateras varje minut.",
|
"long_description": "Visar hur många minuter som återstår i nuvarande dyrperiod. Returnerar 0 när ingen period är aktiv. Uppdateras varje minut. Kontrollera binary_sensor.peak_price_period för att se om en period är aktiv.",
|
||||||
"usage_tips": "För automationer: Använd attribut `remaining_minutes` som 'Om remaining_minutes > 60, avbryt uppskjuten laddningssession' eller 'Om remaining_minutes < 15, återuppta normal drift snart'. UI visar användarvänliga timmar (t.ex. 1,0 h). Värde 0 indikerar ingen aktiv dyrperiod."
|
"usage_tips": "Använd i automationer: 'Om remaining_minutes > 60, avbryt uppskjuten laddningssession'. Värde 0 gör det enkelt att skilja mellan aktiva (värde > 0) och inaktiva (värde = 0) perioder."
|
||||||
},
|
},
|
||||||
"peak_price_progress": {
|
"peak_price_progress": {
|
||||||
"description": "Framsteg genom nuvarande dyrperiod (0% när inaktiv)",
|
"description": "Framsteg genom nuvarande dyrperiod (0% när inaktiv)",
|
||||||
|
|
@ -372,9 +349,19 @@
|
||||||
"usage_tips": "Alltid användbart för planering: 'Nästa dyrperiod startar om 2 timmar'. Automation: 'När nästa starttid är om 30 minuter, minska värmetemperatur förebyggande'."
|
"usage_tips": "Alltid användbart för planering: 'Nästa dyrperiod startar om 2 timmar'. Automation: 'När nästa starttid är om 30 minuter, minska värmetemperatur förebyggande'."
|
||||||
},
|
},
|
||||||
"peak_price_next_in_minutes": {
|
"peak_price_next_in_minutes": {
|
||||||
"description": "Tid till nästa dyrperiod",
|
"description": "Minuter tills nästa dyrperiod startar (0 vid övergång)",
|
||||||
"long_description": "Visar hur länge till nästa dyrperiod. State visas i timmar (t.ex. 0,5 h) för instrumentpaneler, medan attributet `next_in_minutes` ger minuter (t.ex. 30) för automationsvillkor. Under en aktiv period visar detta tiden till perioden EFTER den nuvarande. Returnerar 0 under korta övergångsmoment. Uppdateras varje minut.",
|
"long_description": "Visar minuter tills nästa dyrperiod startar. Under en aktiv period visar detta tiden till perioden EFTER den nuvarande. Returnerar 0 under korta övergångsmoment. Uppdateras varje minut.",
|
||||||
"usage_tips": "För automationer: Använd attribut `next_in_minutes` som 'Om next_in_minutes > 0 OCH next_in_minutes < 10, slutför nuvarande laddcykel nu innan priserna ökar'. Värde > 0 indikerar alltid att en framtida dyrperiod är planerad."
|
"usage_tips": "Förebyggande automation: 'Om next_in_minutes > 0 OCH next_in_minutes < 10, slutför nuvarande laddcykel nu innan priserna ökar'."
|
||||||
|
},
|
||||||
|
"best_price_period_duration": {
|
||||||
|
"description": "Total längd på nuvarande eller nästa billigperiod i minuter",
|
||||||
|
"long_description": "Visar den totala längden på billigperioden i minuter. Under en aktiv period visar detta hela längden av nuvarande period. När ingen period är aktiv visar detta längden på nästa kommande period. Exempel: '90 minuter' för en 1,5-timmars period.",
|
||||||
|
"usage_tips": "Kombinera med remaining_minutes för att planera uppgifter: 'Om duration = 120 OCH remaining_minutes > 90, starta tvättmaskin (tillräckligt med tid för att slutföra)'. Användbart för att förstå om perioder är tillräckligt långa för energikrävande uppgifter."
|
||||||
|
},
|
||||||
|
"peak_price_period_duration": {
|
||||||
|
"description": "Total längd på nuvarande eller nästa dyrperiod i minuter",
|
||||||
|
"long_description": "Visar den totala längden på dyrperioden i minuter. Under en aktiv period visar detta hela längden av nuvarande period. När ingen period är aktiv visar detta längden på nästa kommande period. Exempel: '60 minuter' för en 1-timmars period.",
|
||||||
|
"usage_tips": "Använd för att planera energisparåtgärder: 'Om duration > 120, minska värmetemperatur mer aggressivt (lång dyr period)'. Hjälper till att bedöma hur mycket energiförbrukning måste minskas."
|
||||||
},
|
},
|
||||||
"home_type": {
|
"home_type": {
|
||||||
"description": "Bostadstyp (lägenhet, hus osv.)",
|
"description": "Bostadstyp (lägenhet, hus osv.)",
|
||||||
|
|
@ -447,14 +434,9 @@
|
||||||
"usage_tips": "Använd detta för att övervaka din abonnemangsstatus. Ställ in varningar om statusen ändras från 'Aktiv' för att säkerställa oavbruten service."
|
"usage_tips": "Använd detta för att övervaka din abonnemangsstatus. Ställ in varningar om statusen ändras från 'Aktiv' för att säkerställa oavbruten service."
|
||||||
},
|
},
|
||||||
"chart_data_export": {
|
"chart_data_export": {
|
||||||
"description": "Dataexport för dashboard-integrationer",
|
"description": "Dataexport för instrumentpanelsintegrationer",
|
||||||
"long_description": "Denna sensor anropar get_chartdata-tjänsten med din konfigurerade YAML-konfiguration och exponerar resultatet som entitetsattribut. Statusen visar 'ready' när data är tillgänglig, 'error' vid fel, eller 'pending' före första anropet. Perfekt för dashboard-integrationer som ApexCharts som behöver läsa prisdata från entitetsattribut.",
|
"long_description": "Denna sensor anropar get_chartdata-tjänsten med din konfigurerade YAML-konfiguration och exponerar resultatet som entitetsattribut. Statusen visar 'ready' när data är tillgänglig, 'error' vid fel, eller 'pending' före första anropet. Perfekt för instrumentpanelsintegrationer som ApexCharts som behöver läsa prisdata från entitetsattribut.",
|
||||||
"usage_tips": "Konfigurera YAML-parametrarna i integrationsalternativen för att matcha ditt get_chartdata-tjänstanrop. Sensorn uppdateras automatiskt när prisdata uppdateras (vanligtvis efter midnatt och när morgondagens data anländer). Få tillgång till tjänstesvarsdata direkt från entitetens attribut - strukturen matchar exakt vad get_chartdata returnerar."
|
"usage_tips": "Konfigurera YAML-parametrarna i integrationsinställningarna för att matcha ditt get_chartdata-tjänsteanrop. Sensorn uppdateras automatiskt när prisdata uppdateras (vanligtvis efter midnatt och när morgondagens data anländer). Få åtkomst till tjänstesvarsdata direkt från entitetens attribut - strukturen matchar exakt vad get_chartdata returnerar."
|
||||||
},
|
|
||||||
"chart_metadata": {
|
|
||||||
"description": "Lättviktig metadata för diagramkonfiguration",
|
|
||||||
"long_description": "Tillhandahåller väsentliga diagramkonfigurationsvärden som sensorattribut. Användbart för vilket diagramkort som helst som behöver Y-axelgränser. Sensorn anropar get_chartdata med endast-metadata-läge (ingen databehandling) och extraherar: yaxis_min, yaxis_max (föreslagen Y-axelomfång för optimal skalning). Statusen återspeglar tjänstanropsresultatet: 'ready' vid framgång, 'error' vid fel, 'pending' under initialisering.",
|
|
||||||
"usage_tips": "Konfigurera via configuration.yaml under tibber_prices.chart_metadata_config (valfritt: day, subunit_currency, resolution). Sensorn uppdateras automatiskt vid pris dataändringar. Få tillgång till metadata från attribut: yaxis_min, yaxis_max. Använd med config-template-card eller vilket verktyg som helst som läser entitetsattribut - perfekt för dynamisk diagramkonfiguration utan manuella beräkningar."
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"binary_sensor": {
|
"binary_sensor": {
|
||||||
|
|
@ -487,80 +469,11 @@
|
||||||
"description": "Om realtidsförbrukningsövervakning är aktiv",
|
"description": "Om realtidsförbrukningsövervakning är aktiv",
|
||||||
"long_description": "Indikerar om realtidsövervakning av elförbrukning är aktiverad och aktiv för ditt Tibber-hem. Detta kräver kompatibel mätutrustning (t.ex. Tibber Pulse) och en aktiv prenumeration.",
|
"long_description": "Indikerar om realtidsövervakning av elförbrukning är aktiverad och aktiv för ditt Tibber-hem. Detta kräver kompatibel mätutrustning (t.ex. Tibber Pulse) och en aktiv prenumeration.",
|
||||||
"usage_tips": "Använd detta för att verifiera att realtidsförbrukningen är tillgänglig. Aktivera meddelanden om detta oväntat ändras till 'av', vilket indikerar potentiella hårdvaru- eller anslutningsproblem."
|
"usage_tips": "Använd detta för att verifiera att realtidsförbrukningen är tillgänglig. Aktivera meddelanden om detta oväntat ändras till 'av', vilket indikerar potentiella hårdvaru- eller anslutningsproblem."
|
||||||
}
|
|
||||||
},
|
|
||||||
"number": {
|
|
||||||
"best_price_flex_override": {
|
|
||||||
"description": "Maximal procent över daglig minimumpris som intervaller kan ha och fortfarande kvalificera som 'bästa pris'. Rekommenderas: 15-20 med lättnad aktiverad (standard), eller 25-35 utan lättnad. Maximum: 50 (hårt tak för tillförlitlig perioddetektering).",
|
|
||||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Flexibilitet'-inställningen från alternativ-dialogen för bästa pris-periodberäkningar.",
|
|
||||||
"usage_tips": "Aktivera denna entitet för att dynamiskt justera bästa pris-detektering via automatiseringar, t.ex. högre flexibilitet för kritiska laster eller striktare krav för flexibla apparater."
|
|
||||||
},
|
},
|
||||||
"best_price_min_distance_override": {
|
"chart_data_export": {
|
||||||
"description": "Minsta procentuella avstånd under dagligt genomsnitt. Intervaller måste vara så långt under genomsnittet för att kvalificera som 'bästa pris'. Hjälper att skilja äkta lågprisperioder från genomsnittspriser.",
|
"description": "Dataexport för instrumentpanelsintegrationer",
|
||||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Minimiavstånd'-inställningen från alternativ-dialogen för bästa pris-periodberäkningar.",
|
"long_description": "Denna binär sensor anropar tjänsten get_chartdata för att exportera prissensordata i format som är kompatibelt med ApexCharts och andra instrumentpanelskomponenter. Använd denna tillsammans med custom:apexcharts-card för att visa prissensorer på din instrumentpanel.",
|
||||||
"usage_tips": "Öka värdet för striktare bästa pris-kriterier. Minska om för få perioder detekteras."
|
"usage_tips": "Konfigurera YAML-parametrarna i integrationens alternativ under 'ApexCharts-datakonfiguration'. Tjänsten kräver en giltig sensorenhet och returnerar formaterad data för kartrendring. Se dokumentationen för tillgängliga parametrar och anpassningsalternativ."
|
||||||
},
|
|
||||||
"best_price_min_period_length_override": {
|
|
||||||
"description": "Minsta periodlängd i 15-minuters intervaller. Perioder kortare än detta rapporteras inte. Exempel: 2 = minst 30 minuter.",
|
|
||||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Minsta periodlängd'-inställningen från alternativ-dialogen för bästa pris-periodberäkningar.",
|
|
||||||
"usage_tips": "Anpassa till typisk apparatkörtid: 2 (30 min) för snabbprogram, 4-8 (1-2 timmar) för normala cykler, 8+ för långa ECO-program."
|
|
||||||
},
|
|
||||||
"best_price_min_periods_override": {
|
|
||||||
"description": "Minsta antal bästa pris-perioder att hitta dagligen. När lättnad är aktiverad kommer systemet automatiskt att justera kriterierna för att uppnå detta antal.",
|
|
||||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Minsta antal perioder'-inställningen från alternativ-dialogen för bästa pris-periodberäkningar.",
|
|
||||||
"usage_tips": "Ställ in detta på antalet tidskritiska uppgifter du har dagligen. Exempel: 2 för två tvattmaskinskörningar."
|
|
||||||
},
|
|
||||||
"best_price_relaxation_attempts_override": {
|
|
||||||
"description": "Antal försök att gradvis lätta på kriterierna för att uppnå minsta periodantal. Varje försök ökar flexibiliteten med 3 procent. Vid 0 används endast baskriterier.",
|
|
||||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Lättnadsförsök'-inställningen från alternativ-dialogen för bästa pris-periodberäkningar.",
|
|
||||||
"usage_tips": "Högre värden gör perioddetektering mer adaptiv för dagar med stabila priser. Ställ in på 0 för att tvinga strikta kriterier utan lättnad."
|
|
||||||
},
|
|
||||||
"best_price_gap_count_override": {
|
|
||||||
"description": "Maximalt antal dyrare intervaller som kan tillåtas mellan billiga intervaller medan de fortfarande räknas som en sammanhängande period. Vid 0 måste billiga intervaller vara påföljande.",
|
|
||||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Glaptolerans'-inställningen från alternativ-dialogen för bästa pris-periodberäkningar.",
|
|
||||||
"usage_tips": "Öka detta för apparater med variabel last (t.ex. värmepumpar) som kan tolerera korta dyrare intervaller. Ställ in på 0 för kontinuerligt billiga perioder."
|
|
||||||
},
|
|
||||||
"peak_price_flex_override": {
|
|
||||||
"description": "Maximal procent under daglig maximumpris som intervaller kan ha och fortfarande kvalificera som 'topppris'. Samma rekommendationer som för bästa pris-flexibilitet.",
|
|
||||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Flexibilitet'-inställningen från alternativ-dialogen för topppris-periodberäkningar.",
|
|
||||||
"usage_tips": "Använd detta för att justera topppris-tröskeln vid körtid för automatiseringar som undviker förbrukning under dyra timmar."
|
|
||||||
},
|
|
||||||
"peak_price_min_distance_override": {
|
|
||||||
"description": "Minsta procentuella avstånd över dagligt genomsnitt. Intervaller måste vara så långt över genomsnittet för att kvalificera som 'topppris'.",
|
|
||||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Minimiavstånd'-inställningen från alternativ-dialogen för topppris-periodberäkningar.",
|
|
||||||
"usage_tips": "Öka värdet för att endast fånga extrema pristoppar. Minska för att inkludera fler högpristider."
|
|
||||||
},
|
|
||||||
"peak_price_min_period_length_override": {
|
|
||||||
"description": "Minsta periodlängd i 15-minuters intervaller för topppriser. Kortare pristoppar rapporteras inte som perioder.",
|
|
||||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Minsta periodlängd'-inställningen från alternativ-dialogen för topppris-periodberäkningar.",
|
|
||||||
"usage_tips": "Kortare värden fångar korta pristoppar. Längre värden fokuserar på ihållande högprisperioder."
|
|
||||||
},
|
|
||||||
"peak_price_min_periods_override": {
|
|
||||||
"description": "Minsta antal topppris-perioder att hitta dagligen.",
|
|
||||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Minsta antal perioder'-inställningen från alternativ-dialogen för topppris-periodberäkningar.",
|
|
||||||
"usage_tips": "Ställ in detta baserat på hur många högprisperioder du vill fånga per dag för automatiseringar."
|
|
||||||
},
|
|
||||||
"peak_price_relaxation_attempts_override": {
|
|
||||||
"description": "Antal försök att lätta på kriterierna för att uppnå minsta antal topppris-perioder.",
|
|
||||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Lättnadsförsök'-inställningen från alternativ-dialogen för topppris-periodberäkningar.",
|
|
||||||
"usage_tips": "Öka detta om inga perioder hittas på dagar med stabila priser. Ställ in på 0 för att tvinga strikta kriterier."
|
|
||||||
},
|
|
||||||
"peak_price_gap_count_override": {
|
|
||||||
"description": "Maximalt antal billigare intervaller som kan tillåtas mellan dyra intervaller medan de fortfarande räknas som en topppris-period.",
|
|
||||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Glaptolerans'-inställningen från alternativ-dialogen för topppris-periodberäkningar.",
|
|
||||||
"usage_tips": "Högre värden fångar längre högprisperioder även med korta prisdipp. Ställ in på 0 för strikt sammanhängande topppriser."
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"switch": {
|
|
||||||
"best_price_enable_relaxation_override": {
|
|
||||||
"description": "När aktiverad lättas kriterierna automatiskt för att uppnå minsta periodantal. När inaktiverad rapporteras endast perioder som uppfyller strikta kriterier (möjligen noll perioder på dagar med stabila priser).",
|
|
||||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Uppnå minimiantal'-inställningen från alternativ-dialogen för bästa pris-periodberäkningar.",
|
|
||||||
"usage_tips": "Aktivera detta för garanterade dagliga automatiseringsmöjligheter. Inaktivera om du endast vill ha riktigt billiga perioder, även om det innebär inga perioder vissa dagar."
|
|
||||||
},
|
|
||||||
"peak_price_enable_relaxation_override": {
|
|
||||||
"description": "När aktiverad lättas kriterierna automatiskt för att uppnå minsta periodantal. När inaktiverad rapporteras endast äkta pristoppar.",
|
|
||||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Uppnå minimiantal'-inställningen från alternativ-dialogen för topppris-periodberäkningar.",
|
|
||||||
"usage_tips": "Aktivera detta för konsekventa topppris-varningar. Inaktivera för att endast fånga extrema pristoppar."
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"home_types": {
|
"home_types": {
|
||||||
|
|
|
||||||
|
|
@ -70,7 +70,7 @@ async def async_get_config_entry_diagnostics(
|
||||||
},
|
},
|
||||||
"cache_status": {
|
"cache_status": {
|
||||||
"user_data_cached": coordinator._cached_user_data is not None, # noqa: SLF001
|
"user_data_cached": coordinator._cached_user_data is not None, # noqa: SLF001
|
||||||
"has_price_data": coordinator.data is not None and "priceInfo" in (coordinator.data or {}),
|
"price_data_cached": coordinator._cached_price_data is not None, # noqa: SLF001
|
||||||
"transformer_cache_valid": coordinator._data_transformer._cached_transformed_data is not None, # noqa: SLF001
|
"transformer_cache_valid": coordinator._data_transformer._cached_transformed_data is not None, # noqa: SLF001
|
||||||
"period_calculator_cache_valid": coordinator._period_calculator._cached_periods is not None, # noqa: SLF001
|
"period_calculator_cache_valid": coordinator._period_calculator._cached_periods is not None, # noqa: SLF001
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -44,22 +44,6 @@ class TibberPricesEntity(CoordinatorEntity[TibberPricesDataUpdateCoordinator]):
|
||||||
configuration_url="https://developer.tibber.com/explorer",
|
configuration_url="https://developer.tibber.com/explorer",
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
|
||||||
def available(self) -> bool:
|
|
||||||
"""
|
|
||||||
Return if entity is available.
|
|
||||||
|
|
||||||
Entity is unavailable when:
|
|
||||||
- Coordinator has not completed first update (no data yet)
|
|
||||||
- Coordinator has encountered an error (last_update_success = False)
|
|
||||||
|
|
||||||
Note: Auth failures are handled by coordinator's update method,
|
|
||||||
which raises ConfigEntryAuthFailed and triggers reauth flow.
|
|
||||||
"""
|
|
||||||
# Return False if coordinator not ready or has errors
|
|
||||||
# Return True if coordinator has data (bool conversion handles None/empty)
|
|
||||||
return self.coordinator.last_update_success and bool(self.coordinator.data)
|
|
||||||
|
|
||||||
def _get_device_info(self) -> tuple[str, str | None, str | None]:
|
def _get_device_info(self) -> tuple[str, str | None, str | None]:
|
||||||
"""Get device name, ID and type."""
|
"""Get device name, ID and type."""
|
||||||
user_profile = self.coordinator.get_user_profile()
|
user_profile = self.coordinator.get_user_profile()
|
||||||
|
|
@ -118,10 +102,8 @@ class TibberPricesEntity(CoordinatorEntity[TibberPricesDataUpdateCoordinator]):
|
||||||
return "Tibber Home", None
|
return "Tibber Home", None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Use 'or {}' to handle None values (API may return None during maintenance)
|
address1 = str(self.coordinator.data.get("address", {}).get("address1", ""))
|
||||||
address = self.coordinator.data.get("address") or {}
|
city = str(self.coordinator.data.get("address", {}).get("city", ""))
|
||||||
address1 = str(address.get("address1", ""))
|
|
||||||
city = str(address.get("city", ""))
|
|
||||||
app_nickname = str(self.coordinator.data.get("appNickname", ""))
|
app_nickname = str(self.coordinator.data.get("appNickname", ""))
|
||||||
home_type = str(self.coordinator.data.get("type", ""))
|
home_type = str(self.coordinator.data.get("type", ""))
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@
|
||||||
Common helper functions for entities across platforms.
|
Common helper functions for entities across platforms.
|
||||||
|
|
||||||
This module provides utility functions used by both sensor and binary_sensor platforms:
|
This module provides utility functions used by both sensor and binary_sensor platforms:
|
||||||
- Price value conversion (major/subunit currency units)
|
- Price value conversion (major/minor currency units)
|
||||||
- Translation helpers (price levels, ratings)
|
- Translation helpers (price levels, ratings)
|
||||||
- Time-based calculations (rolling hour center index)
|
- Time-based calculations (rolling hour center index)
|
||||||
|
|
||||||
|
|
@ -14,52 +14,28 @@ from __future__ import annotations
|
||||||
|
|
||||||
from typing import TYPE_CHECKING
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import get_display_unit_factor, get_price_level_translation
|
from custom_components.tibber_prices.const import get_price_level_translation
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
||||||
from custom_components.tibber_prices.data import TibberPricesConfigEntry
|
|
||||||
from homeassistant.config_entries import ConfigEntry
|
|
||||||
from homeassistant.core import HomeAssistant
|
from homeassistant.core import HomeAssistant
|
||||||
|
|
||||||
|
|
||||||
def get_price_value(
|
def get_price_value(price: float, *, in_euro: bool) -> float:
|
||||||
price: float,
|
|
||||||
*,
|
|
||||||
in_euro: bool | None = None,
|
|
||||||
config_entry: ConfigEntry | TibberPricesConfigEntry | None = None,
|
|
||||||
) -> float:
|
|
||||||
"""
|
"""
|
||||||
Convert price based on unit.
|
Convert price based on unit.
|
||||||
|
|
||||||
NOTE: This function supports two modes for backward compatibility:
|
|
||||||
1. Legacy mode: in_euro=True/False (hardcoded conversion)
|
|
||||||
2. New mode: config_entry (config-driven conversion)
|
|
||||||
|
|
||||||
New code should use get_display_unit_factor(config_entry) directly.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
price: Price value to convert.
|
price: Price value to convert
|
||||||
in_euro: (Legacy) If True, return in base currency; if False, in subunit currency.
|
in_euro: If True, return price in euros; if False, return in cents/øre
|
||||||
config_entry: (New) Config entry to get display unit configuration.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Price in requested unit (major or subunit currency units).
|
Price in requested unit (euros or minor currency units)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Legacy mode: use in_euro parameter
|
return price if in_euro else round((price * 100), 2)
|
||||||
if in_euro is not None:
|
|
||||||
return price if in_euro else round(price * 100, 2)
|
|
||||||
|
|
||||||
# New mode: use config_entry
|
|
||||||
if config_entry is not None:
|
|
||||||
factor = get_display_unit_factor(config_entry)
|
|
||||||
return round(price * factor, 2)
|
|
||||||
|
|
||||||
# Fallback: default to subunit currency (backward compatibility)
|
|
||||||
return round(price * 100, 2)
|
|
||||||
|
|
||||||
|
|
||||||
def translate_level(hass: HomeAssistant, level: str) -> str:
|
def translate_level(hass: HomeAssistant, level: str) -> str:
|
||||||
|
|
|
||||||
|
|
@ -85,25 +85,19 @@ def get_dynamic_icon(
|
||||||
|
|
||||||
|
|
||||||
def get_trend_icon(key: str, value: Any) -> str | None:
|
def get_trend_icon(key: str, value: Any) -> str | None:
|
||||||
"""Get icon for trend sensors using 5-level trend scale."""
|
"""Get icon for trend sensors."""
|
||||||
# Handle next_price_trend_change TIMESTAMP sensor differently
|
# Handle next_price_trend_change TIMESTAMP sensor differently
|
||||||
# (icon based on attributes, not value which is a timestamp)
|
# (icon based on attributes, not value which is a timestamp)
|
||||||
if key == "next_price_trend_change":
|
if key == "next_price_trend_change":
|
||||||
return None # Will be handled by sensor's icon property using attributes
|
return None # Will be handled by sensor's icon property using attributes
|
||||||
|
|
||||||
if not key.startswith("price_trend_") and key != "current_price_trend":
|
if not key.startswith("price_trend_") or not isinstance(value, str):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
if not isinstance(value, str):
|
|
||||||
return None
|
|
||||||
|
|
||||||
# 5-level trend icons: strongly uses double arrows, normal uses single
|
|
||||||
trend_icons = {
|
trend_icons = {
|
||||||
"strongly_rising": "mdi:chevron-double-up", # Strong upward movement
|
"rising": "mdi:trending-up",
|
||||||
"rising": "mdi:trending-up", # Normal upward trend
|
"falling": "mdi:trending-down",
|
||||||
"stable": "mdi:trending-neutral", # No significant change
|
"stable": "mdi:trending-neutral",
|
||||||
"falling": "mdi:trending-down", # Normal downward trend
|
|
||||||
"strongly_falling": "mdi:chevron-double-down", # Strong downward movement
|
|
||||||
}
|
}
|
||||||
return trend_icons.get(value)
|
return trend_icons.get(value)
|
||||||
|
|
||||||
|
|
@ -203,7 +197,7 @@ def get_price_sensor_icon(
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Only current price sensors get dynamic icons
|
# Only current price sensors get dynamic icons
|
||||||
if key in ("current_interval_price", "current_interval_price_base"):
|
if key == "current_interval_price":
|
||||||
level = get_price_level_for_icon(coordinator_data, interval_offset=0, time=time)
|
level = get_price_level_for_icon(coordinator_data, interval_offset=0, time=time)
|
||||||
if level:
|
if level:
|
||||||
return PRICE_LEVEL_CASH_ICON_MAPPING.get(level.upper())
|
return PRICE_LEVEL_CASH_ICON_MAPPING.get(level.upper())
|
||||||
|
|
|
||||||
|
|
@ -16,15 +16,7 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"get_apexcharts_yaml": {
|
"get_apexcharts_yaml": {
|
||||||
"service": "mdi:chart-line",
|
"service": "mdi:chart-line"
|
||||||
"sections": {
|
|
||||||
"entry_id": "mdi:identifier",
|
|
||||||
"day": "mdi:calendar-range",
|
|
||||||
"level_type": "mdi:format-list-bulleted-type",
|
|
||||||
"resolution": "mdi:timer-sand",
|
|
||||||
"highlight_best_price": "mdi:battery-charging-low",
|
|
||||||
"highlight_peak_price": "mdi:battery-alert"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"refresh_user_data": {
|
"refresh_user_data": {
|
||||||
"service": "mdi:refresh"
|
"service": "mdi:refresh"
|
||||||
|
|
|
||||||
|
|
@ -4,15 +4,10 @@ from __future__ import annotations
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from typing import TYPE_CHECKING, Any
|
from typing import Any
|
||||||
|
|
||||||
from homeassistant.util import dt as dt_utils
|
from homeassistant.util import dt as dt_utils
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from custom_components.tibber_prices.coordinator.time_service import (
|
|
||||||
TibberPricesTimeService,
|
|
||||||
)
|
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
_LOGGER = logging.getLogger(__name__)
|
||||||
_LOGGER_DETAILS = logging.getLogger(__name__ + ".details")
|
_LOGGER_DETAILS = logging.getLogger(__name__ + ".details")
|
||||||
|
|
||||||
|
|
@ -42,10 +37,9 @@ class TibberPricesIntervalPoolFetchGroupCache:
|
||||||
Protected: 2025-11-23 00:00 to 2025-11-27 00:00
|
Protected: 2025-11-23 00:00 to 2025-11-27 00:00
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, *, time_service: TibberPricesTimeService | None = None) -> None:
|
def __init__(self) -> None:
|
||||||
"""Initialize empty fetch group cache with optional TimeService."""
|
"""Initialize empty fetch group cache."""
|
||||||
self._fetch_groups: list[dict[str, Any]] = []
|
self._fetch_groups: list[dict[str, Any]] = []
|
||||||
self._time_service = time_service
|
|
||||||
|
|
||||||
# Protected range cache (invalidated daily)
|
# Protected range cache (invalidated daily)
|
||||||
self._protected_range_cache: tuple[str, str] | None = None
|
self._protected_range_cache: tuple[str, str] | None = None
|
||||||
|
|
@ -99,11 +93,6 @@ class TibberPricesIntervalPoolFetchGroupCache:
|
||||||
Protected range: day-before-yesterday 00:00 to day-after-tomorrow 00:00.
|
Protected range: day-before-yesterday 00:00 to day-after-tomorrow 00:00.
|
||||||
This range shifts daily automatically.
|
This range shifts daily automatically.
|
||||||
|
|
||||||
Time Machine Support:
|
|
||||||
If time_service was provided at init, uses time_service.now() for
|
|
||||||
"today" calculation. This protects the correct date range when
|
|
||||||
simulating a different date.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tuple of (start_iso, end_iso) for protected range.
|
Tuple of (start_iso, end_iso) for protected range.
|
||||||
Start is inclusive, end is exclusive.
|
Start is inclusive, end is exclusive.
|
||||||
|
|
@ -113,11 +102,10 @@ class TibberPricesIntervalPoolFetchGroupCache:
|
||||||
Protected days: 2025-11-23, 2025-11-24, 2025-11-25, 2025-11-26
|
Protected days: 2025-11-23, 2025-11-24, 2025-11-25, 2025-11-26
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Use TimeService if available (Time Machine support), else real time
|
# Check cache validity (invalidate daily)
|
||||||
now = self._time_service.now() if self._time_service else dt_utils.now()
|
now = dt_utils.now()
|
||||||
today_date_str = now.date().isoformat()
|
today_date_str = now.date().isoformat()
|
||||||
|
|
||||||
# Check cache validity (invalidate daily)
|
|
||||||
if self._protected_range_cache_date == today_date_str and self._protected_range_cache:
|
if self._protected_range_cache_date == today_date_str and self._protected_range_cache:
|
||||||
return self._protected_range_cache
|
return self._protected_range_cache
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
"""Interval fetcher - coverage check and API coordination for interval pool."""
|
"""Interval fetcher - gap detection and API coordination for interval pool."""
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
|
@ -38,7 +38,7 @@ TIME_TOLERANCE_MINUTES = 1
|
||||||
|
|
||||||
|
|
||||||
class TibberPricesIntervalPoolFetcher:
|
class TibberPricesIntervalPoolFetcher:
|
||||||
"""Fetch missing intervals from API based on coverage check."""
|
"""Fetch missing intervals from API based on gap detection."""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
|
|
@ -62,14 +62,14 @@ class TibberPricesIntervalPoolFetcher:
|
||||||
self._index = index
|
self._index = index
|
||||||
self._home_id = home_id
|
self._home_id = home_id
|
||||||
|
|
||||||
def check_coverage(
|
def detect_gaps(
|
||||||
self,
|
self,
|
||||||
cached_intervals: list[dict[str, Any]],
|
cached_intervals: list[dict[str, Any]],
|
||||||
start_time_iso: str,
|
start_time_iso: str,
|
||||||
end_time_iso: str,
|
end_time_iso: str,
|
||||||
) -> list[tuple[str, str]]:
|
) -> list[tuple[str, str]]:
|
||||||
"""
|
"""
|
||||||
Check cache coverage and find missing time ranges.
|
Detect missing time ranges that need to be fetched.
|
||||||
|
|
||||||
This method minimizes API calls by:
|
This method minimizes API calls by:
|
||||||
1. Finding all gaps in cached intervals
|
1. Finding all gaps in cached intervals
|
||||||
|
|
@ -130,7 +130,7 @@ class TibberPricesIntervalPoolFetcher:
|
||||||
if time_diff_before_first > TIME_TOLERANCE_SECONDS:
|
if time_diff_before_first > TIME_TOLERANCE_SECONDS:
|
||||||
missing_ranges.append((start_time_iso, sorted_intervals[0]["startsAt"]))
|
missing_ranges.append((start_time_iso, sorted_intervals[0]["startsAt"]))
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"Missing range before first cached interval: %s to %s (%.1f seconds)",
|
"Gap before first cached interval: %s to %s (%.1f seconds)",
|
||||||
start_time_iso,
|
start_time_iso,
|
||||||
sorted_intervals[0]["startsAt"],
|
sorted_intervals[0]["startsAt"],
|
||||||
time_diff_before_first,
|
time_diff_before_first,
|
||||||
|
|
@ -163,7 +163,7 @@ class TibberPricesIntervalPoolFetcher:
|
||||||
current_interval_end = current_dt + timedelta(minutes=expected_interval_minutes)
|
current_interval_end = current_dt + timedelta(minutes=expected_interval_minutes)
|
||||||
missing_ranges.append((current_interval_end.isoformat(), next_start))
|
missing_ranges.append((current_interval_end.isoformat(), next_start))
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"Missing range between cached intervals: %s (ends at %s) to %s (%.1f min, expected %d min)",
|
"Gap between cached intervals: %s (ends at %s) to %s (%.1f min gap, expected %d min)",
|
||||||
current_start,
|
current_start,
|
||||||
current_interval_end.isoformat(),
|
current_interval_end.isoformat(),
|
||||||
next_start,
|
next_start,
|
||||||
|
|
@ -190,7 +190,7 @@ class TibberPricesIntervalPoolFetcher:
|
||||||
# Missing range starts AFTER the last cached interval ends
|
# Missing range starts AFTER the last cached interval ends
|
||||||
missing_ranges.append((last_interval_end_dt.isoformat(), end_time_iso))
|
missing_ranges.append((last_interval_end_dt.isoformat(), end_time_iso))
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"Missing range after last cached interval: %s (ends at %s) to %s (%.1f seconds, need >= %d)",
|
"Gap after last cached interval: %s (ends at %s) to %s (%.1f seconds, need >= %d)",
|
||||||
sorted_intervals[-1]["startsAt"],
|
sorted_intervals[-1]["startsAt"],
|
||||||
last_interval_end_dt.isoformat(),
|
last_interval_end_dt.isoformat(),
|
||||||
end_time_iso,
|
end_time_iso,
|
||||||
|
|
@ -200,7 +200,7 @@ class TibberPricesIntervalPoolFetcher:
|
||||||
|
|
||||||
if not missing_ranges:
|
if not missing_ranges:
|
||||||
_LOGGER.debug(
|
_LOGGER.debug(
|
||||||
"Full coverage - all intervals cached for range %s to %s",
|
"No gaps detected - all intervals cached for range %s to %s",
|
||||||
start_time_iso,
|
start_time_iso,
|
||||||
end_time_iso,
|
end_time_iso,
|
||||||
)
|
)
|
||||||
|
|
@ -285,7 +285,7 @@ class TibberPricesIntervalPoolFetcher:
|
||||||
|
|
||||||
for idx, (missing_start_iso, missing_end_iso) in enumerate(missing_ranges, start=1):
|
for idx, (missing_start_iso, missing_end_iso) in enumerate(missing_ranges, start=1):
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"Fetching from Tibber API (%d/%d) for home %s: range %s to %s",
|
"API call %d/%d for home %s: fetching range %s to %s",
|
||||||
idx,
|
idx,
|
||||||
len(missing_ranges),
|
len(missing_ranges),
|
||||||
self._home_id,
|
self._home_id,
|
||||||
|
|
@ -309,9 +309,10 @@ class TibberPricesIntervalPoolFetcher:
|
||||||
all_fetched_intervals.append(fetched_intervals)
|
all_fetched_intervals.append(fetched_intervals)
|
||||||
|
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"Received %d intervals from Tibber API for home %s",
|
"Fetched %d intervals from API for home %s (fetch time: %s)",
|
||||||
len(fetched_intervals),
|
len(fetched_intervals),
|
||||||
self._home_id,
|
self._home_id,
|
||||||
|
fetch_time_iso,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Notify callback if provided (for immediate caching)
|
# Notify callback if provided (for immediate caching)
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,6 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from datetime import datetime
|
|
||||||
from typing import TYPE_CHECKING, Any
|
from typing import TYPE_CHECKING, Any
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
|
@ -18,13 +17,6 @@ _LOGGER_DETAILS = logging.getLogger(__name__ + ".details")
|
||||||
MAX_CACHE_SIZE = 960
|
MAX_CACHE_SIZE = 960
|
||||||
|
|
||||||
|
|
||||||
def _normalize_starts_at(starts_at: datetime | str) -> str:
|
|
||||||
"""Normalize startsAt to consistent format (YYYY-MM-DDTHH:MM:SS)."""
|
|
||||||
if isinstance(starts_at, datetime):
|
|
||||||
return starts_at.strftime("%Y-%m-%dT%H:%M:%S")
|
|
||||||
return starts_at[:19]
|
|
||||||
|
|
||||||
|
|
||||||
class TibberPricesIntervalPoolGarbageCollector:
|
class TibberPricesIntervalPoolGarbageCollector:
|
||||||
"""
|
"""
|
||||||
Manages cache eviction and dead interval cleanup.
|
Manages cache eviction and dead interval cleanup.
|
||||||
|
|
@ -85,15 +77,6 @@ class TibberPricesIntervalPoolGarbageCollector:
|
||||||
self._home_id,
|
self._home_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Phase 1.5: Remove empty fetch groups (after dead interval cleanup)
|
|
||||||
empty_removed = self._remove_empty_groups(fetch_groups)
|
|
||||||
if empty_removed > 0:
|
|
||||||
_LOGGER_DETAILS.debug(
|
|
||||||
"GC removed %d empty fetch groups (home %s)",
|
|
||||||
empty_removed,
|
|
||||||
self._home_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Phase 2: Count total intervals after cleanup
|
# Phase 2: Count total intervals after cleanup
|
||||||
total_intervals = self._cache.count_total_intervals()
|
total_intervals = self._cache.count_total_intervals()
|
||||||
|
|
||||||
|
|
@ -111,7 +94,7 @@ class TibberPricesIntervalPoolGarbageCollector:
|
||||||
|
|
||||||
if not evicted_indices:
|
if not evicted_indices:
|
||||||
# All intervals are protected, cannot evict
|
# All intervals are protected, cannot evict
|
||||||
return dead_count > 0 or empty_removed > 0
|
return dead_count > 0
|
||||||
|
|
||||||
# Phase 4: Rebuild cache and index
|
# Phase 4: Rebuild cache and index
|
||||||
new_fetch_groups = [group for idx, group in enumerate(fetch_groups) if idx not in evicted_indices]
|
new_fetch_groups = [group for idx, group in enumerate(fetch_groups) if idx not in evicted_indices]
|
||||||
|
|
@ -127,35 +110,6 @@ class TibberPricesIntervalPoolGarbageCollector:
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _remove_empty_groups(self, fetch_groups: list[dict[str, Any]]) -> int:
|
|
||||||
"""
|
|
||||||
Remove fetch groups with no intervals.
|
|
||||||
|
|
||||||
After dead interval cleanup, some groups may be completely empty.
|
|
||||||
These should be removed to prevent memory accumulation.
|
|
||||||
|
|
||||||
Note: This modifies the cache's internal list in-place and rebuilds
|
|
||||||
the index to maintain consistency.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
fetch_groups: List of fetch groups (will be modified).
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Number of empty groups removed.
|
|
||||||
|
|
||||||
"""
|
|
||||||
# Find non-empty groups
|
|
||||||
non_empty_groups = [group for group in fetch_groups if group["intervals"]]
|
|
||||||
removed_count = len(fetch_groups) - len(non_empty_groups)
|
|
||||||
|
|
||||||
if removed_count > 0:
|
|
||||||
# Update cache with filtered list
|
|
||||||
self._cache.set_fetch_groups(non_empty_groups)
|
|
||||||
# Rebuild index since group indices changed
|
|
||||||
self._index.rebuild(non_empty_groups)
|
|
||||||
|
|
||||||
return removed_count
|
|
||||||
|
|
||||||
def _cleanup_dead_intervals(self, fetch_groups: list[dict[str, Any]]) -> int:
|
def _cleanup_dead_intervals(self, fetch_groups: list[dict[str, Any]]) -> int:
|
||||||
"""
|
"""
|
||||||
Remove dead intervals from all fetch groups.
|
Remove dead intervals from all fetch groups.
|
||||||
|
|
@ -181,7 +135,7 @@ class TibberPricesIntervalPoolGarbageCollector:
|
||||||
living_intervals = []
|
living_intervals = []
|
||||||
|
|
||||||
for interval_idx, interval in enumerate(old_intervals):
|
for interval_idx, interval in enumerate(old_intervals):
|
||||||
starts_at_normalized = _normalize_starts_at(interval["startsAt"])
|
starts_at_normalized = interval["startsAt"][:19]
|
||||||
index_entry = self._index.get(starts_at_normalized)
|
index_entry = self._index.get(starts_at_normalized)
|
||||||
|
|
||||||
if index_entry is not None:
|
if index_entry is not None:
|
||||||
|
|
|
||||||
|
|
@ -93,28 +93,6 @@ class TibberPricesIntervalPoolTimestampIndex:
|
||||||
starts_at_normalized = self._normalize_timestamp(timestamp)
|
starts_at_normalized = self._normalize_timestamp(timestamp)
|
||||||
self._index.pop(starts_at_normalized, None)
|
self._index.pop(starts_at_normalized, None)
|
||||||
|
|
||||||
def update_batch(
|
|
||||||
self,
|
|
||||||
updates: list[tuple[str, int, int]],
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Update multiple index entries efficiently in a single operation.
|
|
||||||
|
|
||||||
More efficient than calling remove() + add() for each entry,
|
|
||||||
as it avoids repeated dict operations and normalization.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
updates: List of (timestamp, fetch_group_index, interval_index) tuples.
|
|
||||||
Timestamps will be normalized automatically.
|
|
||||||
|
|
||||||
"""
|
|
||||||
for timestamp, fetch_group_index, interval_index in updates:
|
|
||||||
starts_at_normalized = self._normalize_timestamp(timestamp)
|
|
||||||
self._index[starts_at_normalized] = {
|
|
||||||
"fetch_group_index": fetch_group_index,
|
|
||||||
"interval_index": interval_index,
|
|
||||||
}
|
|
||||||
|
|
||||||
def clear(self) -> None:
|
def clear(self) -> None:
|
||||||
"""Clear entire index."""
|
"""Clear entire index."""
|
||||||
self._index.clear()
|
self._index.clear()
|
||||||
|
|
|
||||||
|
|
@ -3,26 +3,21 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
import contextlib
|
|
||||||
import logging
|
import logging
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from typing import TYPE_CHECKING, Any
|
from typing import TYPE_CHECKING, Any
|
||||||
from zoneinfo import ZoneInfo
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.api.exceptions import TibberPricesApiClientError
|
from custom_components.tibber_prices.api.exceptions import TibberPricesApiClientError
|
||||||
from homeassistant.util import dt as dt_utils
|
from homeassistant.util import dt as dt_utils
|
||||||
|
|
||||||
from .cache import TibberPricesIntervalPoolFetchGroupCache
|
from .cache import TibberPricesIntervalPoolFetchGroupCache
|
||||||
from .fetcher import TibberPricesIntervalPoolFetcher
|
from .fetcher import TibberPricesIntervalPoolFetcher
|
||||||
from .garbage_collector import MAX_CACHE_SIZE, TibberPricesIntervalPoolGarbageCollector
|
from .garbage_collector import TibberPricesIntervalPoolGarbageCollector
|
||||||
from .index import TibberPricesIntervalPoolTimestampIndex
|
from .index import TibberPricesIntervalPoolTimestampIndex
|
||||||
from .storage import async_save_pool_state
|
from .storage import async_save_pool_state
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from custom_components.tibber_prices.api.client import TibberPricesApiClient
|
from custom_components.tibber_prices.api.client import TibberPricesApiClient
|
||||||
from custom_components.tibber_prices.coordinator.time_service import (
|
|
||||||
TibberPricesTimeService,
|
|
||||||
)
|
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
_LOGGER = logging.getLogger(__name__)
|
||||||
_LOGGER_DETAILS = logging.getLogger(__name__ + ".details")
|
_LOGGER_DETAILS = logging.getLogger(__name__ + ".details")
|
||||||
|
|
@ -35,13 +30,6 @@ INTERVAL_QUARTER_HOURLY = 15
|
||||||
DEBOUNCE_DELAY_SECONDS = 3.0
|
DEBOUNCE_DELAY_SECONDS = 3.0
|
||||||
|
|
||||||
|
|
||||||
def _normalize_starts_at(starts_at: datetime | str) -> str:
|
|
||||||
"""Normalize startsAt to consistent format (YYYY-MM-DDTHH:MM:SS)."""
|
|
||||||
if isinstance(starts_at, datetime):
|
|
||||||
return starts_at.strftime("%Y-%m-%dT%H:%M:%S")
|
|
||||||
return starts_at[:19]
|
|
||||||
|
|
||||||
|
|
||||||
class TibberPricesIntervalPool:
|
class TibberPricesIntervalPool:
|
||||||
"""
|
"""
|
||||||
High-performance interval cache manager for a single Tibber home.
|
High-performance interval cache manager for a single Tibber home.
|
||||||
|
|
@ -82,7 +70,6 @@ class TibberPricesIntervalPool:
|
||||||
api: TibberPricesApiClient,
|
api: TibberPricesApiClient,
|
||||||
hass: Any | None = None,
|
hass: Any | None = None,
|
||||||
entry_id: str | None = None,
|
entry_id: str | None = None,
|
||||||
time_service: TibberPricesTimeService | None = None,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Initialize interval pool manager.
|
Initialize interval pool manager.
|
||||||
|
|
@ -92,15 +79,12 @@ class TibberPricesIntervalPool:
|
||||||
api: API client for fetching intervals.
|
api: API client for fetching intervals.
|
||||||
hass: HomeAssistant instance for auto-save (optional).
|
hass: HomeAssistant instance for auto-save (optional).
|
||||||
entry_id: Config entry ID for auto-save (optional).
|
entry_id: Config entry ID for auto-save (optional).
|
||||||
time_service: TimeService for time-travel support (optional).
|
|
||||||
If None, uses real time (dt_utils.now()).
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
self._home_id = home_id
|
self._home_id = home_id
|
||||||
self._time_service = time_service
|
|
||||||
|
|
||||||
# Initialize components with dependency injection
|
# Initialize components with dependency injection
|
||||||
self._cache = TibberPricesIntervalPoolFetchGroupCache(time_service=time_service)
|
self._cache = TibberPricesIntervalPoolFetchGroupCache()
|
||||||
self._index = TibberPricesIntervalPoolTimestampIndex()
|
self._index = TibberPricesIntervalPoolTimestampIndex()
|
||||||
self._gc = TibberPricesIntervalPoolGarbageCollector(self._cache, self._index, home_id)
|
self._gc = TibberPricesIntervalPoolGarbageCollector(self._cache, self._index, home_id)
|
||||||
self._fetcher = TibberPricesIntervalPoolFetcher(api, self._cache, self._index, home_id)
|
self._fetcher = TibberPricesIntervalPoolFetcher(api, self._cache, self._index, home_id)
|
||||||
|
|
@ -118,7 +102,7 @@ class TibberPricesIntervalPool:
|
||||||
user_data: dict[str, Any],
|
user_data: dict[str, Any],
|
||||||
start_time: datetime,
|
start_time: datetime,
|
||||||
end_time: datetime,
|
end_time: datetime,
|
||||||
) -> tuple[list[dict[str, Any]], bool]:
|
) -> list[dict[str, Any]]:
|
||||||
"""
|
"""
|
||||||
Get price intervals for time range (cached + fetch missing).
|
Get price intervals for time range (cached + fetch missing).
|
||||||
|
|
||||||
|
|
@ -139,10 +123,8 @@ class TibberPricesIntervalPool:
|
||||||
end_time: End of range (exclusive, timezone-aware).
|
end_time: End of range (exclusive, timezone-aware).
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tuple of (intervals, api_called):
|
List of price interval dicts, sorted by startsAt.
|
||||||
- intervals: List of price interval dicts, sorted by startsAt.
|
Contains ALL intervals in requested range (cached + fetched).
|
||||||
Contains ALL intervals in requested range (cached + fetched).
|
|
||||||
- api_called: True if API was called to fetch missing data, False if all from cache.
|
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
TibberPricesApiClientError: If API calls fail or validation errors.
|
TibberPricesApiClientError: If API calls fail or validation errors.
|
||||||
|
|
@ -171,18 +153,19 @@ class TibberPricesIntervalPool:
|
||||||
# Get cached intervals using index
|
# Get cached intervals using index
|
||||||
cached_intervals = self._get_cached_intervals(start_time_iso, end_time_iso)
|
cached_intervals = self._get_cached_intervals(start_time_iso, end_time_iso)
|
||||||
|
|
||||||
# Check coverage - find ranges not in cache
|
# Detect missing ranges
|
||||||
missing_ranges = self._fetcher.check_coverage(cached_intervals, start_time_iso, end_time_iso)
|
missing_ranges = self._fetcher.detect_gaps(cached_intervals, start_time_iso, end_time_iso)
|
||||||
|
|
||||||
if missing_ranges:
|
if missing_ranges:
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"Coverage check for home %s: %d range(s) missing - will fetch from API",
|
"Detected %d missing range(s) for home %s - will make %d API call(s)",
|
||||||
|
len(missing_ranges),
|
||||||
self._home_id,
|
self._home_id,
|
||||||
len(missing_ranges),
|
len(missing_ranges),
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"Coverage check for home %s: full coverage in cache - no API calls needed",
|
"All intervals available in cache for home %s - zero API calls needed",
|
||||||
self._home_id,
|
self._home_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -202,240 +185,17 @@ class TibberPricesIntervalPool:
|
||||||
# This ensures we return exactly what user requested, filtering out extra intervals
|
# This ensures we return exactly what user requested, filtering out extra intervals
|
||||||
final_result = self._get_cached_intervals(start_time_iso, end_time_iso)
|
final_result = self._get_cached_intervals(start_time_iso, end_time_iso)
|
||||||
|
|
||||||
# Track if API was called (True if any missing ranges were fetched)
|
|
||||||
api_called = len(missing_ranges) > 0
|
|
||||||
|
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"Pool returning %d intervals for home %s (from cache: %d, fetched from API: %d ranges, api_called=%s)",
|
"Interval pool returning %d intervals for home %s "
|
||||||
|
"(initially %d cached, %d API calls made, final %d after re-reading cache)",
|
||||||
len(final_result),
|
len(final_result),
|
||||||
self._home_id,
|
self._home_id,
|
||||||
len(cached_intervals),
|
len(cached_intervals),
|
||||||
len(missing_ranges),
|
len(missing_ranges),
|
||||||
api_called,
|
len(final_result),
|
||||||
)
|
)
|
||||||
|
|
||||||
return final_result, api_called
|
return final_result
|
||||||
|
|
||||||
async def get_sensor_data(
|
|
||||||
self,
|
|
||||||
api_client: TibberPricesApiClient,
|
|
||||||
user_data: dict[str, Any],
|
|
||||||
home_timezone: str | None = None,
|
|
||||||
*,
|
|
||||||
include_tomorrow: bool = True,
|
|
||||||
) -> tuple[list[dict[str, Any]], bool]:
|
|
||||||
"""
|
|
||||||
Get price intervals for sensor data (day-before-yesterday to end-of-tomorrow).
|
|
||||||
|
|
||||||
Convenience method for coordinator/sensors that need the standard 4-day window:
|
|
||||||
- Day before yesterday (for trailing 24h averages at midnight)
|
|
||||||
- Yesterday (for trailing 24h averages)
|
|
||||||
- Today (current prices)
|
|
||||||
- Tomorrow (if available in cache)
|
|
||||||
|
|
||||||
IMPORTANT - Two distinct behaviors:
|
|
||||||
1. API FETCH: Controlled by include_tomorrow flag
|
|
||||||
- include_tomorrow=False → Only fetch up to end of today (prevents API spam before 13:00)
|
|
||||||
- include_tomorrow=True → Fetch including tomorrow data
|
|
||||||
2. RETURN DATA: Always returns full protected range (including tomorrow if cached)
|
|
||||||
- This ensures cached tomorrow data is used even if include_tomorrow=False
|
|
||||||
|
|
||||||
The separation prevents the following bug:
|
|
||||||
- If include_tomorrow affected both fetch AND return, cached tomorrow data
|
|
||||||
would be lost when include_tomorrow=False, causing infinite refresh loops.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
api_client: TibberPricesApiClient instance for API calls.
|
|
||||||
user_data: User data dict containing home metadata.
|
|
||||||
home_timezone: Optional timezone string (e.g., "Europe/Berlin").
|
|
||||||
include_tomorrow: If True, fetch tomorrow's data from API. If False,
|
|
||||||
only fetch up to end of today. Default True.
|
|
||||||
DOES NOT affect returned data - always returns full range.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple of (intervals, api_called):
|
|
||||||
- intervals: List of price interval dicts for the 4-day window (including any cached
|
|
||||||
tomorrow data), sorted by startsAt.
|
|
||||||
- api_called: True if API was called to fetch missing data, False if all from cache.
|
|
||||||
|
|
||||||
"""
|
|
||||||
# Determine timezone
|
|
||||||
tz_str = home_timezone
|
|
||||||
if not tz_str:
|
|
||||||
tz_str = self._extract_timezone_from_user_data(user_data)
|
|
||||||
|
|
||||||
# Calculate range in home's timezone
|
|
||||||
tz = ZoneInfo(tz_str) if tz_str else None
|
|
||||||
now = self._time_service.now() if self._time_service else dt_utils.now()
|
|
||||||
now_local = now.astimezone(tz) if tz else now
|
|
||||||
|
|
||||||
# Day before yesterday 00:00 (start) - same for both fetch and return
|
|
||||||
day_before_yesterday = (now_local - timedelta(days=2)).replace(hour=0, minute=0, second=0, microsecond=0)
|
|
||||||
|
|
||||||
# End of tomorrow (full protected range) - used for RETURN data
|
|
||||||
end_of_tomorrow = (now_local + timedelta(days=2)).replace(hour=0, minute=0, second=0, microsecond=0)
|
|
||||||
|
|
||||||
# API fetch range depends on include_tomorrow flag
|
|
||||||
if include_tomorrow:
|
|
||||||
fetch_end_time = end_of_tomorrow
|
|
||||||
fetch_desc = "end-of-tomorrow"
|
|
||||||
else:
|
|
||||||
# Only fetch up to end of today (prevents API spam before 13:00)
|
|
||||||
fetch_end_time = (now_local + timedelta(days=1)).replace(hour=0, minute=0, second=0, microsecond=0)
|
|
||||||
fetch_desc = "end-of-today"
|
|
||||||
|
|
||||||
_LOGGER.debug(
|
|
||||||
"Sensor data request for home %s: fetch %s to %s (%s), return up to %s",
|
|
||||||
self._home_id,
|
|
||||||
day_before_yesterday.isoformat(),
|
|
||||||
fetch_end_time.isoformat(),
|
|
||||||
fetch_desc,
|
|
||||||
end_of_tomorrow.isoformat(),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Fetch data (may be partial if include_tomorrow=False)
|
|
||||||
_intervals, api_called = await self.get_intervals(
|
|
||||||
api_client=api_client,
|
|
||||||
user_data=user_data,
|
|
||||||
start_time=day_before_yesterday,
|
|
||||||
end_time=fetch_end_time,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Return FULL protected range (including any cached tomorrow data)
|
|
||||||
# This ensures cached tomorrow data is available even when include_tomorrow=False
|
|
||||||
final_intervals = self._get_cached_intervals(
|
|
||||||
day_before_yesterday.isoformat(),
|
|
||||||
end_of_tomorrow.isoformat(),
|
|
||||||
)
|
|
||||||
|
|
||||||
return final_intervals, api_called
|
|
||||||
|
|
||||||
def get_pool_stats(self) -> dict[str, Any]:
|
|
||||||
"""
|
|
||||||
Get statistics about the interval pool.
|
|
||||||
|
|
||||||
Returns comprehensive statistics for diagnostic sensors, separated into:
|
|
||||||
- Sensor intervals (protected range: day-before-yesterday to tomorrow)
|
|
||||||
- Cache statistics (entire pool including service-requested data)
|
|
||||||
|
|
||||||
Protected Range:
|
|
||||||
The protected range covers 4 days at 15-min resolution = 384 intervals.
|
|
||||||
These intervals are never evicted by garbage collection.
|
|
||||||
|
|
||||||
Cache Fill Level:
|
|
||||||
Shows how full the cache is relative to MAX_CACHE_SIZE (960).
|
|
||||||
100% is not bad - just means we're using the available space.
|
|
||||||
GC will evict oldest non-protected intervals when limit is reached.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dict with sensor intervals, cache stats, and timestamps.
|
|
||||||
|
|
||||||
"""
|
|
||||||
fetch_groups = self._cache.get_fetch_groups()
|
|
||||||
|
|
||||||
# === Sensor Intervals (Protected Range) ===
|
|
||||||
sensor_stats = self._get_sensor_interval_stats()
|
|
||||||
|
|
||||||
# === Cache Statistics (Entire Pool) ===
|
|
||||||
cache_total = self._index.count()
|
|
||||||
cache_limit = MAX_CACHE_SIZE
|
|
||||||
cache_fill_percent = round((cache_total / cache_limit) * 100, 1) if cache_limit > 0 else 0
|
|
||||||
cache_extra = max(0, cache_total - sensor_stats["count"]) # Intervals outside protected range
|
|
||||||
|
|
||||||
# === Timestamps ===
|
|
||||||
# Last sensor fetch (for protected range data)
|
|
||||||
last_sensor_fetch: str | None = None
|
|
||||||
oldest_interval: str | None = None
|
|
||||||
newest_interval: str | None = None
|
|
||||||
|
|
||||||
if fetch_groups:
|
|
||||||
# Find newest fetch group (most recent API call)
|
|
||||||
newest_group = max(fetch_groups, key=lambda g: g["fetched_at"])
|
|
||||||
last_sensor_fetch = newest_group["fetched_at"].isoformat()
|
|
||||||
|
|
||||||
# Find oldest and newest intervals across all fetch groups
|
|
||||||
all_timestamps = list(self._index.get_raw_index().keys())
|
|
||||||
if all_timestamps:
|
|
||||||
oldest_interval = min(all_timestamps)
|
|
||||||
newest_interval = max(all_timestamps)
|
|
||||||
|
|
||||||
return {
|
|
||||||
# Sensor intervals (protected range)
|
|
||||||
"sensor_intervals_count": sensor_stats["count"],
|
|
||||||
"sensor_intervals_expected": sensor_stats["expected"],
|
|
||||||
"sensor_intervals_has_gaps": sensor_stats["has_gaps"],
|
|
||||||
# Cache statistics
|
|
||||||
"cache_intervals_total": cache_total,
|
|
||||||
"cache_intervals_limit": cache_limit,
|
|
||||||
"cache_fill_percent": cache_fill_percent,
|
|
||||||
"cache_intervals_extra": cache_extra,
|
|
||||||
# Timestamps
|
|
||||||
"last_sensor_fetch": last_sensor_fetch,
|
|
||||||
"cache_oldest_interval": oldest_interval,
|
|
||||||
"cache_newest_interval": newest_interval,
|
|
||||||
# Fetch groups (API calls)
|
|
||||||
"fetch_groups_count": len(fetch_groups),
|
|
||||||
}
|
|
||||||
|
|
||||||
def _get_sensor_interval_stats(self) -> dict[str, Any]:
|
|
||||||
"""
|
|
||||||
Get statistics for sensor intervals (protected range).
|
|
||||||
|
|
||||||
Protected range: day-before-yesterday 00:00 to day-after-tomorrow 00:00.
|
|
||||||
Expected: 4 days * 24 hours * 4 intervals = 384 intervals.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dict with count, expected, and has_gaps.
|
|
||||||
|
|
||||||
"""
|
|
||||||
start_iso, end_iso = self._cache.get_protected_range()
|
|
||||||
start_dt = datetime.fromisoformat(start_iso)
|
|
||||||
end_dt = datetime.fromisoformat(end_iso)
|
|
||||||
|
|
||||||
# Count expected intervals (15-min resolution)
|
|
||||||
expected_count = int((end_dt - start_dt).total_seconds() / (15 * 60))
|
|
||||||
|
|
||||||
# Count actual intervals in range
|
|
||||||
actual_count = 0
|
|
||||||
current_dt = start_dt
|
|
||||||
|
|
||||||
while current_dt < end_dt:
|
|
||||||
current_key = current_dt.isoformat()[:19]
|
|
||||||
if self._index.contains(current_key):
|
|
||||||
actual_count += 1
|
|
||||||
current_dt += timedelta(minutes=15)
|
|
||||||
|
|
||||||
return {
|
|
||||||
"count": actual_count,
|
|
||||||
"expected": expected_count,
|
|
||||||
"has_gaps": actual_count < expected_count,
|
|
||||||
}
|
|
||||||
|
|
||||||
def _has_gaps_in_protected_range(self) -> bool:
|
|
||||||
"""
|
|
||||||
Check if there are gaps in the protected date range.
|
|
||||||
|
|
||||||
Delegates to _get_sensor_interval_stats() for consistency.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if any gaps exist, False if protected range is complete.
|
|
||||||
|
|
||||||
"""
|
|
||||||
return self._get_sensor_interval_stats()["has_gaps"]
|
|
||||||
|
|
||||||
def _extract_timezone_from_user_data(self, user_data: dict[str, Any]) -> str | None:
|
|
||||||
"""Extract timezone for this home from user_data."""
|
|
||||||
if not user_data:
|
|
||||||
return None
|
|
||||||
|
|
||||||
viewer = user_data.get("viewer", {})
|
|
||||||
homes = viewer.get("homes", [])
|
|
||||||
|
|
||||||
for home in homes:
|
|
||||||
if home.get("id") == self._home_id:
|
|
||||||
return home.get("timeZone")
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _get_cached_intervals(
|
def _get_cached_intervals(
|
||||||
self,
|
self,
|
||||||
|
|
@ -447,47 +207,30 @@ class TibberPricesIntervalPool:
|
||||||
|
|
||||||
Uses timestamp_index for O(1) lookups per timestamp.
|
Uses timestamp_index for O(1) lookups per timestamp.
|
||||||
|
|
||||||
IMPORTANT: Returns shallow copies of interval dicts to prevent external
|
|
||||||
mutations (e.g., by parse_all_timestamps()) from affecting cached data.
|
|
||||||
The Pool cache must remain immutable to ensure consistent behavior.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
start_time_iso: ISO timestamp string (inclusive).
|
start_time_iso: ISO timestamp string (inclusive).
|
||||||
end_time_iso: ISO timestamp string (exclusive).
|
end_time_iso: ISO timestamp string (exclusive).
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List of cached interval dicts in time range (may be empty or incomplete).
|
List of cached interval dicts in time range (may be empty or incomplete).
|
||||||
Sorted by startsAt timestamp. Each dict is a shallow copy.
|
Sorted by startsAt timestamp.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Parse query range once
|
# Parse query range once
|
||||||
start_time_dt = datetime.fromisoformat(start_time_iso)
|
start_time_dt = datetime.fromisoformat(start_time_iso)
|
||||||
end_time_dt = datetime.fromisoformat(end_time_iso)
|
end_time_dt = datetime.fromisoformat(end_time_iso)
|
||||||
|
|
||||||
# CRITICAL: Use NAIVE local timestamps for iteration.
|
|
||||||
#
|
|
||||||
# Index keys are naive local timestamps (timezone stripped via [:19]).
|
|
||||||
# When start and end span a DST transition, they have different UTC offsets
|
|
||||||
# (e.g., start=+01:00 CET, end=+02:00 CEST). Using fixed-offset datetimes
|
|
||||||
# from fromisoformat() causes the loop to compare UTC values for the end
|
|
||||||
# boundary, ending 1 hour early on spring-forward days (or 1 hour late on
|
|
||||||
# fall-back days).
|
|
||||||
#
|
|
||||||
# By iterating in naive local time, we match the index key format exactly
|
|
||||||
# and the end boundary comparison works correctly regardless of DST.
|
|
||||||
current_naive = start_time_dt.replace(tzinfo=None)
|
|
||||||
end_naive = end_time_dt.replace(tzinfo=None)
|
|
||||||
|
|
||||||
# Use index to find intervals: iterate through expected timestamps
|
# Use index to find intervals: iterate through expected timestamps
|
||||||
result = []
|
result = []
|
||||||
|
current_dt = start_time_dt
|
||||||
|
|
||||||
# Determine interval step (15 min post-2025-10-01, 60 min pre)
|
# Determine interval step (15 min post-2025-10-01, 60 min pre)
|
||||||
resolution_change_naive = datetime(2025, 10, 1) # noqa: DTZ001
|
resolution_change_dt = datetime(2025, 10, 1, tzinfo=start_time_dt.tzinfo)
|
||||||
interval_minutes = INTERVAL_QUARTER_HOURLY if current_naive >= resolution_change_naive else INTERVAL_HOURLY
|
interval_minutes = INTERVAL_QUARTER_HOURLY if current_dt >= resolution_change_dt else INTERVAL_HOURLY
|
||||||
|
|
||||||
while current_naive < end_naive:
|
while current_dt < end_time_dt:
|
||||||
# Check if this timestamp exists in index (O(1) lookup)
|
# Check if this timestamp exists in index (O(1) lookup)
|
||||||
current_dt_key = current_naive.isoformat()[:19]
|
current_dt_key = current_dt.isoformat()[:19]
|
||||||
location = self._index.get(current_dt_key)
|
location = self._index.get(current_dt_key)
|
||||||
|
|
||||||
if location is not None:
|
if location is not None:
|
||||||
|
|
@ -495,21 +238,19 @@ class TibberPricesIntervalPool:
|
||||||
fetch_groups = self._cache.get_fetch_groups()
|
fetch_groups = self._cache.get_fetch_groups()
|
||||||
fetch_group = fetch_groups[location["fetch_group_index"]]
|
fetch_group = fetch_groups[location["fetch_group_index"]]
|
||||||
interval = fetch_group["intervals"][location["interval_index"]]
|
interval = fetch_group["intervals"][location["interval_index"]]
|
||||||
# CRITICAL: Return shallow copy to prevent external mutations
|
result.append(interval)
|
||||||
# (e.g., parse_all_timestamps() converts startsAt to datetime in-place)
|
|
||||||
result.append(dict(interval))
|
|
||||||
|
|
||||||
# Move to next expected interval
|
# Move to next expected interval
|
||||||
current_naive += timedelta(minutes=interval_minutes)
|
current_dt += timedelta(minutes=interval_minutes)
|
||||||
|
|
||||||
# Handle resolution change boundary
|
# Handle resolution change boundary
|
||||||
if interval_minutes == INTERVAL_HOURLY and current_naive >= resolution_change_naive:
|
if interval_minutes == INTERVAL_HOURLY and current_dt >= resolution_change_dt:
|
||||||
interval_minutes = INTERVAL_QUARTER_HOURLY
|
interval_minutes = INTERVAL_QUARTER_HOURLY
|
||||||
|
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
"Retrieved %d intervals from cache for home %s (range %s to %s)",
|
"Cache lookup for home %s: found %d intervals in range %s to %s",
|
||||||
len(result),
|
|
||||||
self._home_id,
|
self._home_id,
|
||||||
|
len(result),
|
||||||
start_time_iso,
|
start_time_iso,
|
||||||
end_time_iso,
|
end_time_iso,
|
||||||
)
|
)
|
||||||
|
|
@ -547,7 +288,7 @@ class TibberPricesIntervalPool:
|
||||||
intervals_to_touch = []
|
intervals_to_touch = []
|
||||||
|
|
||||||
for interval in intervals:
|
for interval in intervals:
|
||||||
starts_at_normalized = _normalize_starts_at(interval["startsAt"])
|
starts_at_normalized = interval["startsAt"][:19]
|
||||||
if not self._index.contains(starts_at_normalized):
|
if not self._index.contains(starts_at_normalized):
|
||||||
new_intervals.append(interval)
|
new_intervals.append(interval)
|
||||||
else:
|
else:
|
||||||
|
|
@ -579,7 +320,7 @@ class TibberPricesIntervalPool:
|
||||||
|
|
||||||
# Update timestamp index for all new intervals
|
# Update timestamp index for all new intervals
|
||||||
for interval_index, interval in enumerate(new_intervals):
|
for interval_index, interval in enumerate(new_intervals):
|
||||||
starts_at_normalized = _normalize_starts_at(interval["startsAt"])
|
starts_at_normalized = interval["startsAt"][:19]
|
||||||
self._index.add(interval, fetch_group_index, interval_index)
|
self._index.add(interval, fetch_group_index, interval_index)
|
||||||
|
|
||||||
_LOGGER_DETAILS.debug(
|
_LOGGER_DETAILS.debug(
|
||||||
|
|
@ -631,13 +372,13 @@ class TibberPricesIntervalPool:
|
||||||
# Add touch group to cache
|
# Add touch group to cache
|
||||||
touch_group_index = self._cache.add_fetch_group(touch_intervals, fetch_time_dt)
|
touch_group_index = self._cache.add_fetch_group(touch_intervals, fetch_time_dt)
|
||||||
|
|
||||||
# Update index to point to new fetch group using batch operation
|
# Update index to point to new fetch group
|
||||||
# This is more efficient than individual remove+add calls
|
for interval_index, (starts_at_normalized, _) in enumerate(intervals_to_touch):
|
||||||
index_updates = [
|
# Remove old index entry
|
||||||
(starts_at_normalized, touch_group_index, interval_index)
|
self._index.remove(starts_at_normalized)
|
||||||
for interval_index, (starts_at_normalized, _) in enumerate(intervals_to_touch)
|
# Add new index entry pointing to touch group
|
||||||
]
|
interval = touch_intervals[interval_index]
|
||||||
self._index.update_batch(index_updates)
|
self._index.add(interval, touch_group_index, interval_index)
|
||||||
|
|
||||||
_LOGGER.debug(
|
_LOGGER.debug(
|
||||||
"Touched %d cached intervals for home %s (moved to fetch group %d, fetched at %s)",
|
"Touched %d cached intervals for home %s (moved to fetch group %d, fetched at %s)",
|
||||||
|
|
@ -678,36 +419,6 @@ class TibberPricesIntervalPool:
|
||||||
_LOGGER.debug("Auto-save timer cancelled (expected - new changes arrived)")
|
_LOGGER.debug("Auto-save timer cancelled (expected - new changes arrived)")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
async def async_shutdown(self) -> None:
|
|
||||||
"""
|
|
||||||
Clean shutdown - cancel pending background tasks.
|
|
||||||
|
|
||||||
Should be called when the config entry is unloaded to prevent
|
|
||||||
orphaned tasks and ensure clean resource cleanup.
|
|
||||||
|
|
||||||
"""
|
|
||||||
_LOGGER.debug("Shutting down interval pool for home %s", self._home_id)
|
|
||||||
|
|
||||||
# Cancel debounce task if running
|
|
||||||
if self._save_debounce_task is not None and not self._save_debounce_task.done():
|
|
||||||
self._save_debounce_task.cancel()
|
|
||||||
with contextlib.suppress(asyncio.CancelledError):
|
|
||||||
await self._save_debounce_task
|
|
||||||
_LOGGER.debug("Cancelled pending auto-save task")
|
|
||||||
|
|
||||||
# Cancel any other background tasks
|
|
||||||
if self._background_tasks:
|
|
||||||
for task in list(self._background_tasks):
|
|
||||||
if not task.done():
|
|
||||||
task.cancel()
|
|
||||||
# Wait for all tasks to complete cancellation
|
|
||||||
if self._background_tasks:
|
|
||||||
await asyncio.gather(*self._background_tasks, return_exceptions=True)
|
|
||||||
_LOGGER.debug("Cancelled %d background tasks", len(self._background_tasks))
|
|
||||||
self._background_tasks.clear()
|
|
||||||
|
|
||||||
_LOGGER.debug("Interval pool shutdown complete for home %s", self._home_id)
|
|
||||||
|
|
||||||
async def _auto_save_pool_state(self) -> None:
|
async def _auto_save_pool_state(self) -> None:
|
||||||
"""Auto-save pool state to storage with lock protection."""
|
"""Auto-save pool state to storage with lock protection."""
|
||||||
if self._hass is None or self._entry_id is None:
|
if self._hass is None or self._entry_id is None:
|
||||||
|
|
@ -740,7 +451,7 @@ class TibberPricesIntervalPool:
|
||||||
living_intervals = []
|
living_intervals = []
|
||||||
|
|
||||||
for interval_idx, interval in enumerate(fetch_group["intervals"]):
|
for interval_idx, interval in enumerate(fetch_group["intervals"]):
|
||||||
starts_at_normalized = _normalize_starts_at(interval["startsAt"])
|
starts_at_normalized = interval["startsAt"][:19]
|
||||||
|
|
||||||
# Check if interval is still referenced in index
|
# Check if interval is still referenced in index
|
||||||
location = self._index.get(starts_at_normalized)
|
location = self._index.get(starts_at_normalized)
|
||||||
|
|
@ -775,7 +486,6 @@ class TibberPricesIntervalPool:
|
||||||
api: TibberPricesApiClient,
|
api: TibberPricesApiClient,
|
||||||
hass: Any | None = None,
|
hass: Any | None = None,
|
||||||
entry_id: str | None = None,
|
entry_id: str | None = None,
|
||||||
time_service: TibberPricesTimeService | None = None,
|
|
||||||
) -> TibberPricesIntervalPool | None:
|
) -> TibberPricesIntervalPool | None:
|
||||||
"""
|
"""
|
||||||
Restore interval pool manager from storage.
|
Restore interval pool manager from storage.
|
||||||
|
|
@ -788,7 +498,6 @@ class TibberPricesIntervalPool:
|
||||||
api: API client for fetching intervals.
|
api: API client for fetching intervals.
|
||||||
hass: HomeAssistant instance for auto-save (optional).
|
hass: HomeAssistant instance for auto-save (optional).
|
||||||
entry_id: Config entry ID for auto-save (optional).
|
entry_id: Config entry ID for auto-save (optional).
|
||||||
time_service: TimeService for time-travel support (optional).
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Restored TibberPricesIntervalPool instance, or None if format unknown/corrupted.
|
Restored TibberPricesIntervalPool instance, or None if format unknown/corrupted.
|
||||||
|
|
@ -808,7 +517,7 @@ class TibberPricesIntervalPool:
|
||||||
home_id = data["home_id"]
|
home_id = data["home_id"]
|
||||||
|
|
||||||
# Create manager with home_id from storage
|
# Create manager with home_id from storage
|
||||||
manager = cls(home_id=home_id, api=api, hass=hass, entry_id=entry_id, time_service=time_service)
|
manager = cls(home_id=home_id, api=api, hass=hass, entry_id=entry_id)
|
||||||
|
|
||||||
# Restore fetch groups to cache
|
# Restore fetch groups to cache
|
||||||
for serialized_group in data.get("fetch_groups", []):
|
for serialized_group in data.get("fetch_groups", []):
|
||||||
|
|
|
||||||
|
|
@ -11,5 +11,5 @@
|
||||||
"requirements": [
|
"requirements": [
|
||||||
"aiofiles>=23.2.1"
|
"aiofiles>=23.2.1"
|
||||||
],
|
],
|
||||||
"version": "0.27.0"
|
"version": "0.18.0"
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,39 +0,0 @@
|
||||||
"""
|
|
||||||
Number platform for Tibber Prices integration.
|
|
||||||
|
|
||||||
Provides configurable number entities for runtime overrides of Best Price
|
|
||||||
and Peak Price period calculation settings. These entities allow automation
|
|
||||||
of configuration parameters without using the options flow.
|
|
||||||
|
|
||||||
When enabled, these entities take precedence over the options flow settings.
|
|
||||||
When disabled (default), the options flow settings are used.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from typing import TYPE_CHECKING
|
|
||||||
|
|
||||||
from .core import TibberPricesConfigNumber
|
|
||||||
from .definitions import NUMBER_ENTITY_DESCRIPTIONS
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from custom_components.tibber_prices.data import TibberPricesConfigEntry
|
|
||||||
from homeassistant.core import HomeAssistant
|
|
||||||
from homeassistant.helpers.entity_platform import AddEntitiesCallback
|
|
||||||
|
|
||||||
|
|
||||||
async def async_setup_entry(
|
|
||||||
_hass: HomeAssistant,
|
|
||||||
entry: TibberPricesConfigEntry,
|
|
||||||
async_add_entities: AddEntitiesCallback,
|
|
||||||
) -> None:
|
|
||||||
"""Set up Tibber Prices number entities based on a config entry."""
|
|
||||||
coordinator = entry.runtime_data.coordinator
|
|
||||||
|
|
||||||
async_add_entities(
|
|
||||||
TibberPricesConfigNumber(
|
|
||||||
coordinator=coordinator,
|
|
||||||
entity_description=entity_description,
|
|
||||||
)
|
|
||||||
for entity_description in NUMBER_ENTITY_DESCRIPTIONS
|
|
||||||
)
|
|
||||||
|
|
@ -1,242 +0,0 @@
|
||||||
"""
|
|
||||||
Number entity implementation for Tibber Prices configuration overrides.
|
|
||||||
|
|
||||||
These entities allow runtime configuration of period calculation settings.
|
|
||||||
When a config entity is enabled, its value takes precedence over the
|
|
||||||
options flow setting for period calculations.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import logging
|
|
||||||
from typing import TYPE_CHECKING, Any
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import (
|
|
||||||
DOMAIN,
|
|
||||||
get_home_type_translation,
|
|
||||||
get_translation,
|
|
||||||
)
|
|
||||||
from homeassistant.components.number import NumberEntity, RestoreNumber
|
|
||||||
from homeassistant.core import callback
|
|
||||||
from homeassistant.helpers.device_registry import DeviceEntryType, DeviceInfo
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from custom_components.tibber_prices.coordinator import (
|
|
||||||
TibberPricesDataUpdateCoordinator,
|
|
||||||
)
|
|
||||||
|
|
||||||
from .definitions import TibberPricesNumberEntityDescription
|
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class TibberPricesConfigNumber(RestoreNumber, NumberEntity):
|
|
||||||
"""
|
|
||||||
A number entity for configuring period calculation settings at runtime.
|
|
||||||
|
|
||||||
When this entity is enabled, its value overrides the corresponding
|
|
||||||
options flow setting. When disabled (default), the options flow
|
|
||||||
setting is used for period calculations.
|
|
||||||
|
|
||||||
The entity restores its value after Home Assistant restart.
|
|
||||||
"""
|
|
||||||
|
|
||||||
_attr_has_entity_name = True
|
|
||||||
entity_description: TibberPricesNumberEntityDescription
|
|
||||||
|
|
||||||
# Exclude all attributes from recorder history - config entities don't need history
|
|
||||||
_unrecorded_attributes = frozenset(
|
|
||||||
{
|
|
||||||
"description",
|
|
||||||
"long_description",
|
|
||||||
"usage_tips",
|
|
||||||
"friendly_name",
|
|
||||||
"icon",
|
|
||||||
"unit_of_measurement",
|
|
||||||
"mode",
|
|
||||||
"min",
|
|
||||||
"max",
|
|
||||||
"step",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
coordinator: TibberPricesDataUpdateCoordinator,
|
|
||||||
entity_description: TibberPricesNumberEntityDescription,
|
|
||||||
) -> None:
|
|
||||||
"""Initialize the config number entity."""
|
|
||||||
self.coordinator = coordinator
|
|
||||||
self.entity_description = entity_description
|
|
||||||
|
|
||||||
# Set unique ID
|
|
||||||
self._attr_unique_id = (
|
|
||||||
f"{coordinator.config_entry.unique_id or coordinator.config_entry.entry_id}_{entity_description.key}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize with None - will be set in async_added_to_hass
|
|
||||||
self._attr_native_value: float | None = None
|
|
||||||
|
|
||||||
# Setup device info
|
|
||||||
self._setup_device_info()
|
|
||||||
|
|
||||||
def _setup_device_info(self) -> None:
|
|
||||||
"""Set up device information."""
|
|
||||||
home_name, home_id, home_type = self._get_device_info()
|
|
||||||
language = self.coordinator.hass.config.language or "en"
|
|
||||||
translated_model = get_home_type_translation(home_type, language) if home_type else "Unknown"
|
|
||||||
|
|
||||||
self._attr_device_info = DeviceInfo(
|
|
||||||
entry_type=DeviceEntryType.SERVICE,
|
|
||||||
identifiers={
|
|
||||||
(
|
|
||||||
DOMAIN,
|
|
||||||
self.coordinator.config_entry.unique_id or self.coordinator.config_entry.entry_id,
|
|
||||||
)
|
|
||||||
},
|
|
||||||
name=home_name,
|
|
||||||
manufacturer="Tibber",
|
|
||||||
model=translated_model,
|
|
||||||
serial_number=home_id if home_id else None,
|
|
||||||
configuration_url="https://developer.tibber.com/explorer",
|
|
||||||
)
|
|
||||||
|
|
||||||
def _get_device_info(self) -> tuple[str, str | None, str | None]:
|
|
||||||
"""Get device name, ID and type."""
|
|
||||||
user_profile = self.coordinator.get_user_profile()
|
|
||||||
is_subentry = bool(self.coordinator.config_entry.data.get("home_id"))
|
|
||||||
home_id = self.coordinator.config_entry.unique_id
|
|
||||||
home_type = None
|
|
||||||
|
|
||||||
if is_subentry:
|
|
||||||
home_data = self.coordinator.config_entry.data.get("home_data", {})
|
|
||||||
home_id = self.coordinator.config_entry.data.get("home_id")
|
|
||||||
address = home_data.get("address", {})
|
|
||||||
address1 = address.get("address1", "")
|
|
||||||
city = address.get("city", "")
|
|
||||||
app_nickname = home_data.get("appNickname", "")
|
|
||||||
home_type = home_data.get("type", "")
|
|
||||||
|
|
||||||
if app_nickname and app_nickname.strip():
|
|
||||||
home_name = app_nickname.strip()
|
|
||||||
elif address1:
|
|
||||||
home_name = address1
|
|
||||||
if city:
|
|
||||||
home_name = f"{home_name}, {city}"
|
|
||||||
else:
|
|
||||||
home_name = f"Tibber Home {home_id[:8]}" if home_id else "Tibber Home"
|
|
||||||
elif user_profile:
|
|
||||||
home_name = user_profile.get("name") or "Tibber Home"
|
|
||||||
else:
|
|
||||||
home_name = "Tibber Home"
|
|
||||||
|
|
||||||
return home_name, home_id, home_type
|
|
||||||
|
|
||||||
async def async_added_to_hass(self) -> None:
|
|
||||||
"""Handle entity which was added to Home Assistant."""
|
|
||||||
await super().async_added_to_hass()
|
|
||||||
|
|
||||||
# Try to restore previous state
|
|
||||||
last_number_data = await self.async_get_last_number_data()
|
|
||||||
if last_number_data is not None and last_number_data.native_value is not None:
|
|
||||||
self._attr_native_value = last_number_data.native_value
|
|
||||||
_LOGGER.debug(
|
|
||||||
"Restored %s value: %s",
|
|
||||||
self.entity_description.key,
|
|
||||||
self._attr_native_value,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# Initialize with value from options flow (or default)
|
|
||||||
self._attr_native_value = self._get_value_from_options()
|
|
||||||
_LOGGER.debug(
|
|
||||||
"Initialized %s from options: %s",
|
|
||||||
self.entity_description.key,
|
|
||||||
self._attr_native_value,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Register override with coordinator if entity is enabled
|
|
||||||
# This happens during add, so check entity registry
|
|
||||||
await self._sync_override_state()
|
|
||||||
|
|
||||||
async def async_will_remove_from_hass(self) -> None:
|
|
||||||
"""Handle entity removal from Home Assistant."""
|
|
||||||
# Remove override when entity is removed
|
|
||||||
self.coordinator.remove_config_override(
|
|
||||||
self.entity_description.config_key,
|
|
||||||
self.entity_description.config_section,
|
|
||||||
)
|
|
||||||
await super().async_will_remove_from_hass()
|
|
||||||
|
|
||||||
def _get_value_from_options(self) -> float:
|
|
||||||
"""Get the current value from options flow or default."""
|
|
||||||
options = self.coordinator.config_entry.options
|
|
||||||
section = options.get(self.entity_description.config_section, {})
|
|
||||||
value = section.get(
|
|
||||||
self.entity_description.config_key,
|
|
||||||
self.entity_description.default_value,
|
|
||||||
)
|
|
||||||
return float(value)
|
|
||||||
|
|
||||||
async def _sync_override_state(self) -> None:
|
|
||||||
"""Sync the override state with the coordinator based on entity enabled state."""
|
|
||||||
# Check if entity is enabled in registry
|
|
||||||
if self.registry_entry is not None and not self.registry_entry.disabled:
|
|
||||||
# Entity is enabled - register the override
|
|
||||||
if self._attr_native_value is not None:
|
|
||||||
self.coordinator.set_config_override(
|
|
||||||
self.entity_description.config_key,
|
|
||||||
self.entity_description.config_section,
|
|
||||||
self._attr_native_value,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# Entity is disabled - remove override
|
|
||||||
self.coordinator.remove_config_override(
|
|
||||||
self.entity_description.config_key,
|
|
||||||
self.entity_description.config_section,
|
|
||||||
)
|
|
||||||
|
|
||||||
async def async_set_native_value(self, value: float) -> None:
|
|
||||||
"""Update the current value and trigger recalculation."""
|
|
||||||
self._attr_native_value = value
|
|
||||||
|
|
||||||
# Update the coordinator's runtime override
|
|
||||||
self.coordinator.set_config_override(
|
|
||||||
self.entity_description.config_key,
|
|
||||||
self.entity_description.config_section,
|
|
||||||
value,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Trigger period recalculation (same path as options update)
|
|
||||||
await self.coordinator.async_handle_config_override_update()
|
|
||||||
|
|
||||||
_LOGGER.debug(
|
|
||||||
"Updated %s to %s, triggered period recalculation",
|
|
||||||
self.entity_description.key,
|
|
||||||
value,
|
|
||||||
)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def extra_state_attributes(self) -> dict[str, Any] | None:
|
|
||||||
"""Return entity state attributes with description."""
|
|
||||||
language = self.coordinator.hass.config.language or "en"
|
|
||||||
|
|
||||||
# Try to get description from custom translations
|
|
||||||
# Custom translations use direct path: number.{key}.description
|
|
||||||
translation_path = [
|
|
||||||
"number",
|
|
||||||
self.entity_description.translation_key or self.entity_description.key,
|
|
||||||
"description",
|
|
||||||
]
|
|
||||||
description = get_translation(translation_path, language)
|
|
||||||
|
|
||||||
attrs: dict[str, Any] = {}
|
|
||||||
if description:
|
|
||||||
attrs["description"] = description
|
|
||||||
|
|
||||||
return attrs if attrs else None
|
|
||||||
|
|
||||||
@callback
|
|
||||||
def async_registry_entry_updated(self) -> None:
|
|
||||||
"""Handle entity registry update (enabled/disabled state change)."""
|
|
||||||
# This is called when the entity is enabled/disabled in the UI
|
|
||||||
self.hass.async_create_task(self._sync_override_state())
|
|
||||||
|
|
@ -1,250 +0,0 @@
|
||||||
"""
|
|
||||||
Number entity definitions for Tibber Prices configuration overrides.
|
|
||||||
|
|
||||||
These number entities allow runtime configuration of Best Price and Peak Price
|
|
||||||
period calculation settings. They are disabled by default - users can enable
|
|
||||||
individual entities to override specific settings at runtime.
|
|
||||||
|
|
||||||
When enabled, the entity value takes precedence over the options flow setting.
|
|
||||||
When disabled (default), the options flow setting is used.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from dataclasses import dataclass
|
|
||||||
|
|
||||||
from homeassistant.components.number import (
|
|
||||||
NumberEntityDescription,
|
|
||||||
NumberMode,
|
|
||||||
)
|
|
||||||
from homeassistant.const import PERCENTAGE, EntityCategory
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass(frozen=True, kw_only=True)
|
|
||||||
class TibberPricesNumberEntityDescription(NumberEntityDescription):
|
|
||||||
"""Describes a Tibber Prices number entity for config overrides."""
|
|
||||||
|
|
||||||
# The config key this entity overrides (matches CONF_* constants)
|
|
||||||
config_key: str
|
|
||||||
# The section in options where this setting is stored (e.g., "flexibility_settings")
|
|
||||||
config_section: str
|
|
||||||
# Whether this is for best_price (False) or peak_price (True)
|
|
||||||
is_peak_price: bool = False
|
|
||||||
# Default value from const.py
|
|
||||||
default_value: float | int = 0
|
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# BEST PRICE PERIOD CONFIGURATION OVERRIDES
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
BEST_PRICE_NUMBER_ENTITIES = (
|
|
||||||
TibberPricesNumberEntityDescription(
|
|
||||||
key="best_price_flex_override",
|
|
||||||
translation_key="best_price_flex_override",
|
|
||||||
name="Best Price: Flexibility",
|
|
||||||
icon="mdi:arrow-down-bold-circle",
|
|
||||||
entity_category=EntityCategory.CONFIG,
|
|
||||||
entity_registry_enabled_default=False,
|
|
||||||
native_min_value=0,
|
|
||||||
native_max_value=50,
|
|
||||||
native_step=1,
|
|
||||||
native_unit_of_measurement=PERCENTAGE,
|
|
||||||
mode=NumberMode.SLIDER,
|
|
||||||
config_key="best_price_flex",
|
|
||||||
config_section="flexibility_settings",
|
|
||||||
is_peak_price=False,
|
|
||||||
default_value=15, # DEFAULT_BEST_PRICE_FLEX
|
|
||||||
),
|
|
||||||
TibberPricesNumberEntityDescription(
|
|
||||||
key="best_price_min_distance_override",
|
|
||||||
translation_key="best_price_min_distance_override",
|
|
||||||
name="Best Price: Minimum Distance",
|
|
||||||
icon="mdi:arrow-down-bold-circle",
|
|
||||||
entity_category=EntityCategory.CONFIG,
|
|
||||||
entity_registry_enabled_default=False,
|
|
||||||
native_min_value=-50,
|
|
||||||
native_max_value=0,
|
|
||||||
native_step=1,
|
|
||||||
native_unit_of_measurement=PERCENTAGE,
|
|
||||||
mode=NumberMode.SLIDER,
|
|
||||||
config_key="best_price_min_distance_from_avg",
|
|
||||||
config_section="flexibility_settings",
|
|
||||||
is_peak_price=False,
|
|
||||||
default_value=-5, # DEFAULT_BEST_PRICE_MIN_DISTANCE_FROM_AVG
|
|
||||||
),
|
|
||||||
TibberPricesNumberEntityDescription(
|
|
||||||
key="best_price_min_period_length_override",
|
|
||||||
translation_key="best_price_min_period_length_override",
|
|
||||||
name="Best Price: Minimum Period Length",
|
|
||||||
icon="mdi:arrow-down-bold-circle",
|
|
||||||
entity_category=EntityCategory.CONFIG,
|
|
||||||
entity_registry_enabled_default=False,
|
|
||||||
native_min_value=15,
|
|
||||||
native_max_value=180,
|
|
||||||
native_step=15,
|
|
||||||
native_unit_of_measurement="min",
|
|
||||||
mode=NumberMode.SLIDER,
|
|
||||||
config_key="best_price_min_period_length",
|
|
||||||
config_section="period_settings",
|
|
||||||
is_peak_price=False,
|
|
||||||
default_value=60, # DEFAULT_BEST_PRICE_MIN_PERIOD_LENGTH
|
|
||||||
),
|
|
||||||
TibberPricesNumberEntityDescription(
|
|
||||||
key="best_price_min_periods_override",
|
|
||||||
translation_key="best_price_min_periods_override",
|
|
||||||
name="Best Price: Minimum Periods",
|
|
||||||
icon="mdi:arrow-down-bold-circle",
|
|
||||||
entity_category=EntityCategory.CONFIG,
|
|
||||||
entity_registry_enabled_default=False,
|
|
||||||
native_min_value=1,
|
|
||||||
native_max_value=10,
|
|
||||||
native_step=1,
|
|
||||||
mode=NumberMode.SLIDER,
|
|
||||||
config_key="min_periods_best",
|
|
||||||
config_section="relaxation_and_target_periods",
|
|
||||||
is_peak_price=False,
|
|
||||||
default_value=2, # DEFAULT_MIN_PERIODS_BEST
|
|
||||||
),
|
|
||||||
TibberPricesNumberEntityDescription(
|
|
||||||
key="best_price_relaxation_attempts_override",
|
|
||||||
translation_key="best_price_relaxation_attempts_override",
|
|
||||||
name="Best Price: Relaxation Attempts",
|
|
||||||
icon="mdi:arrow-down-bold-circle",
|
|
||||||
entity_category=EntityCategory.CONFIG,
|
|
||||||
entity_registry_enabled_default=False,
|
|
||||||
native_min_value=1,
|
|
||||||
native_max_value=12,
|
|
||||||
native_step=1,
|
|
||||||
mode=NumberMode.SLIDER,
|
|
||||||
config_key="relaxation_attempts_best",
|
|
||||||
config_section="relaxation_and_target_periods",
|
|
||||||
is_peak_price=False,
|
|
||||||
default_value=11, # DEFAULT_RELAXATION_ATTEMPTS_BEST
|
|
||||||
),
|
|
||||||
TibberPricesNumberEntityDescription(
|
|
||||||
key="best_price_gap_count_override",
|
|
||||||
translation_key="best_price_gap_count_override",
|
|
||||||
name="Best Price: Gap Tolerance",
|
|
||||||
icon="mdi:arrow-down-bold-circle",
|
|
||||||
entity_category=EntityCategory.CONFIG,
|
|
||||||
entity_registry_enabled_default=False,
|
|
||||||
native_min_value=0,
|
|
||||||
native_max_value=8,
|
|
||||||
native_step=1,
|
|
||||||
mode=NumberMode.SLIDER,
|
|
||||||
config_key="best_price_max_level_gap_count",
|
|
||||||
config_section="period_settings",
|
|
||||||
is_peak_price=False,
|
|
||||||
default_value=1, # DEFAULT_BEST_PRICE_MAX_LEVEL_GAP_COUNT
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# PEAK PRICE PERIOD CONFIGURATION OVERRIDES
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
PEAK_PRICE_NUMBER_ENTITIES = (
|
|
||||||
TibberPricesNumberEntityDescription(
|
|
||||||
key="peak_price_flex_override",
|
|
||||||
translation_key="peak_price_flex_override",
|
|
||||||
name="Peak Price: Flexibility",
|
|
||||||
icon="mdi:arrow-up-bold-circle",
|
|
||||||
entity_category=EntityCategory.CONFIG,
|
|
||||||
entity_registry_enabled_default=False,
|
|
||||||
native_min_value=-50,
|
|
||||||
native_max_value=0,
|
|
||||||
native_step=1,
|
|
||||||
native_unit_of_measurement=PERCENTAGE,
|
|
||||||
mode=NumberMode.SLIDER,
|
|
||||||
config_key="peak_price_flex",
|
|
||||||
config_section="flexibility_settings",
|
|
||||||
is_peak_price=True,
|
|
||||||
default_value=-20, # DEFAULT_PEAK_PRICE_FLEX
|
|
||||||
),
|
|
||||||
TibberPricesNumberEntityDescription(
|
|
||||||
key="peak_price_min_distance_override",
|
|
||||||
translation_key="peak_price_min_distance_override",
|
|
||||||
name="Peak Price: Minimum Distance",
|
|
||||||
icon="mdi:arrow-up-bold-circle",
|
|
||||||
entity_category=EntityCategory.CONFIG,
|
|
||||||
entity_registry_enabled_default=False,
|
|
||||||
native_min_value=0,
|
|
||||||
native_max_value=50,
|
|
||||||
native_step=1,
|
|
||||||
native_unit_of_measurement=PERCENTAGE,
|
|
||||||
mode=NumberMode.SLIDER,
|
|
||||||
config_key="peak_price_min_distance_from_avg",
|
|
||||||
config_section="flexibility_settings",
|
|
||||||
is_peak_price=True,
|
|
||||||
default_value=5, # DEFAULT_PEAK_PRICE_MIN_DISTANCE_FROM_AVG
|
|
||||||
),
|
|
||||||
TibberPricesNumberEntityDescription(
|
|
||||||
key="peak_price_min_period_length_override",
|
|
||||||
translation_key="peak_price_min_period_length_override",
|
|
||||||
name="Peak Price: Minimum Period Length",
|
|
||||||
icon="mdi:arrow-up-bold-circle",
|
|
||||||
entity_category=EntityCategory.CONFIG,
|
|
||||||
entity_registry_enabled_default=False,
|
|
||||||
native_min_value=15,
|
|
||||||
native_max_value=180,
|
|
||||||
native_step=15,
|
|
||||||
native_unit_of_measurement="min",
|
|
||||||
mode=NumberMode.SLIDER,
|
|
||||||
config_key="peak_price_min_period_length",
|
|
||||||
config_section="period_settings",
|
|
||||||
is_peak_price=True,
|
|
||||||
default_value=30, # DEFAULT_PEAK_PRICE_MIN_PERIOD_LENGTH
|
|
||||||
),
|
|
||||||
TibberPricesNumberEntityDescription(
|
|
||||||
key="peak_price_min_periods_override",
|
|
||||||
translation_key="peak_price_min_periods_override",
|
|
||||||
name="Peak Price: Minimum Periods",
|
|
||||||
icon="mdi:arrow-up-bold-circle",
|
|
||||||
entity_category=EntityCategory.CONFIG,
|
|
||||||
entity_registry_enabled_default=False,
|
|
||||||
native_min_value=1,
|
|
||||||
native_max_value=10,
|
|
||||||
native_step=1,
|
|
||||||
mode=NumberMode.SLIDER,
|
|
||||||
config_key="min_periods_peak",
|
|
||||||
config_section="relaxation_and_target_periods",
|
|
||||||
is_peak_price=True,
|
|
||||||
default_value=2, # DEFAULT_MIN_PERIODS_PEAK
|
|
||||||
),
|
|
||||||
TibberPricesNumberEntityDescription(
|
|
||||||
key="peak_price_relaxation_attempts_override",
|
|
||||||
translation_key="peak_price_relaxation_attempts_override",
|
|
||||||
name="Peak Price: Relaxation Attempts",
|
|
||||||
icon="mdi:arrow-up-bold-circle",
|
|
||||||
entity_category=EntityCategory.CONFIG,
|
|
||||||
entity_registry_enabled_default=False,
|
|
||||||
native_min_value=1,
|
|
||||||
native_max_value=12,
|
|
||||||
native_step=1,
|
|
||||||
mode=NumberMode.SLIDER,
|
|
||||||
config_key="relaxation_attempts_peak",
|
|
||||||
config_section="relaxation_and_target_periods",
|
|
||||||
is_peak_price=True,
|
|
||||||
default_value=11, # DEFAULT_RELAXATION_ATTEMPTS_PEAK
|
|
||||||
),
|
|
||||||
TibberPricesNumberEntityDescription(
|
|
||||||
key="peak_price_gap_count_override",
|
|
||||||
translation_key="peak_price_gap_count_override",
|
|
||||||
name="Peak Price: Gap Tolerance",
|
|
||||||
icon="mdi:arrow-up-bold-circle",
|
|
||||||
entity_category=EntityCategory.CONFIG,
|
|
||||||
entity_registry_enabled_default=False,
|
|
||||||
native_min_value=0,
|
|
||||||
native_max_value=8,
|
|
||||||
native_step=1,
|
|
||||||
mode=NumberMode.SLIDER,
|
|
||||||
config_key="peak_price_max_level_gap_count",
|
|
||||||
config_section="period_settings",
|
|
||||||
is_peak_price=True,
|
|
||||||
default_value=1, # DEFAULT_PEAK_PRICE_MAX_LEVEL_GAP_COUNT
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
# All number entity descriptions combined
|
|
||||||
NUMBER_ENTITY_DESCRIPTIONS = BEST_PRICE_NUMBER_ENTITIES + PEAK_PRICE_NUMBER_ENTITIES
|
|
||||||
|
|
@ -17,11 +17,6 @@ from __future__ import annotations
|
||||||
|
|
||||||
from typing import TYPE_CHECKING
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import (
|
|
||||||
CONF_CURRENCY_DISPLAY_MODE,
|
|
||||||
DISPLAY_MODE_BASE,
|
|
||||||
)
|
|
||||||
|
|
||||||
from .core import TibberPricesSensor
|
from .core import TibberPricesSensor
|
||||||
from .definitions import ENTITY_DESCRIPTIONS
|
from .definitions import ENTITY_DESCRIPTIONS
|
||||||
|
|
||||||
|
|
@ -39,22 +34,10 @@ async def async_setup_entry(
|
||||||
"""Set up Tibber Prices sensor based on a config entry."""
|
"""Set up Tibber Prices sensor based on a config entry."""
|
||||||
coordinator = entry.runtime_data.coordinator
|
coordinator = entry.runtime_data.coordinator
|
||||||
|
|
||||||
# Get display mode from config
|
|
||||||
display_mode = entry.options.get(CONF_CURRENCY_DISPLAY_MODE, DISPLAY_MODE_BASE)
|
|
||||||
|
|
||||||
# Filter entity descriptions based on display mode
|
|
||||||
# Skip current_interval_price_base if user configured major display
|
|
||||||
# (regular current_interval_price already shows major units)
|
|
||||||
entities_to_create = [
|
|
||||||
entity_description
|
|
||||||
for entity_description in ENTITY_DESCRIPTIONS
|
|
||||||
if not (entity_description.key == "current_interval_price_base" and display_mode == DISPLAY_MODE_BASE)
|
|
||||||
]
|
|
||||||
|
|
||||||
async_add_entities(
|
async_add_entities(
|
||||||
TibberPricesSensor(
|
TibberPricesSensor(
|
||||||
coordinator=coordinator,
|
coordinator=coordinator,
|
||||||
entity_description=entity_description,
|
entity_description=entity_description,
|
||||||
)
|
)
|
||||||
for entity_description in entities_to_create
|
for entity_description in ENTITY_DESCRIPTIONS
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -77,8 +77,6 @@ def build_sensor_attributes(
|
||||||
coordinator: TibberPricesDataUpdateCoordinator,
|
coordinator: TibberPricesDataUpdateCoordinator,
|
||||||
native_value: Any,
|
native_value: Any,
|
||||||
cached_data: dict,
|
cached_data: dict,
|
||||||
*,
|
|
||||||
config_entry: TibberPricesConfigEntry,
|
|
||||||
) -> dict[str, Any] | None:
|
) -> dict[str, Any] | None:
|
||||||
"""
|
"""
|
||||||
Build attributes for a sensor based on its key.
|
Build attributes for a sensor based on its key.
|
||||||
|
|
@ -90,7 +88,6 @@ def build_sensor_attributes(
|
||||||
coordinator: The data update coordinator
|
coordinator: The data update coordinator
|
||||||
native_value: The current native value of the sensor
|
native_value: The current native value of the sensor
|
||||||
cached_data: Dictionary containing cached sensor data
|
cached_data: Dictionary containing cached sensor data
|
||||||
config_entry: Config entry for user preferences
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dictionary of attributes or None if no attributes should be added
|
Dictionary of attributes or None if no attributes should be added
|
||||||
|
|
@ -130,7 +127,6 @@ def build_sensor_attributes(
|
||||||
native_value=native_value,
|
native_value=native_value,
|
||||||
cached_data=cached_data,
|
cached_data=cached_data,
|
||||||
time=time,
|
time=time,
|
||||||
config_entry=config_entry,
|
|
||||||
)
|
)
|
||||||
elif key in [
|
elif key in [
|
||||||
"trailing_price_average",
|
"trailing_price_average",
|
||||||
|
|
@ -140,23 +136,9 @@ def build_sensor_attributes(
|
||||||
"leading_price_min",
|
"leading_price_min",
|
||||||
"leading_price_max",
|
"leading_price_max",
|
||||||
]:
|
]:
|
||||||
add_average_price_attributes(
|
add_average_price_attributes(attributes=attributes, key=key, coordinator=coordinator, time=time)
|
||||||
attributes=attributes,
|
|
||||||
key=key,
|
|
||||||
coordinator=coordinator,
|
|
||||||
time=time,
|
|
||||||
cached_data=cached_data,
|
|
||||||
config_entry=config_entry,
|
|
||||||
)
|
|
||||||
elif key.startswith("next_avg_"):
|
elif key.startswith("next_avg_"):
|
||||||
add_next_avg_attributes(
|
add_next_avg_attributes(attributes=attributes, key=key, coordinator=coordinator, time=time)
|
||||||
attributes=attributes,
|
|
||||||
key=key,
|
|
||||||
coordinator=coordinator,
|
|
||||||
time=time,
|
|
||||||
cached_data=cached_data,
|
|
||||||
config_entry=config_entry,
|
|
||||||
)
|
|
||||||
elif any(
|
elif any(
|
||||||
pattern in key
|
pattern in key
|
||||||
for pattern in [
|
for pattern in [
|
||||||
|
|
@ -178,7 +160,6 @@ def build_sensor_attributes(
|
||||||
key=key,
|
key=key,
|
||||||
cached_data=cached_data,
|
cached_data=cached_data,
|
||||||
time=time,
|
time=time,
|
||||||
config_entry=config_entry,
|
|
||||||
)
|
)
|
||||||
elif key == "data_lifecycle_status":
|
elif key == "data_lifecycle_status":
|
||||||
# Lifecycle sensor uses dedicated builder with calculator
|
# Lifecycle sensor uses dedicated builder with calculator
|
||||||
|
|
|
||||||
|
|
@ -14,9 +14,6 @@ if TYPE_CHECKING:
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
||||||
from custom_components.tibber_prices.data import TibberPricesConfigEntry
|
|
||||||
|
|
||||||
from .helpers import add_alternate_average_attribute
|
|
||||||
|
|
||||||
|
|
||||||
def _get_day_midnight_timestamp(key: str, *, time: TibberPricesTimeService) -> datetime:
|
def _get_day_midnight_timestamp(key: str, *, time: TibberPricesTimeService) -> datetime:
|
||||||
|
|
@ -86,7 +83,6 @@ def add_statistics_attributes(
|
||||||
cached_data: dict,
|
cached_data: dict,
|
||||||
*,
|
*,
|
||||||
time: TibberPricesTimeService,
|
time: TibberPricesTimeService,
|
||||||
config_entry: TibberPricesConfigEntry,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Add attributes for statistics and rating sensors.
|
Add attributes for statistics and rating sensors.
|
||||||
|
|
@ -96,7 +92,6 @@ def add_statistics_attributes(
|
||||||
key: The sensor entity key
|
key: The sensor entity key
|
||||||
cached_data: Dictionary containing cached sensor data
|
cached_data: Dictionary containing cached sensor data
|
||||||
time: TibberPricesTimeService instance (required)
|
time: TibberPricesTimeService instance (required)
|
||||||
config_entry: Config entry for user preferences
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Data timestamp sensor - shows API fetch time
|
# Data timestamp sensor - shows API fetch time
|
||||||
|
|
@ -131,17 +126,10 @@ def add_statistics_attributes(
|
||||||
attributes["timestamp"] = extreme_starts_at
|
attributes["timestamp"] = extreme_starts_at
|
||||||
return
|
return
|
||||||
|
|
||||||
# Daily average sensors - show midnight to indicate whole day + add alternate value
|
# Daily average sensors - show midnight to indicate whole day
|
||||||
daily_avg_sensors = {"average_price_today", "average_price_tomorrow"}
|
daily_avg_sensors = {"average_price_today", "average_price_tomorrow"}
|
||||||
if key in daily_avg_sensors:
|
if key in daily_avg_sensors:
|
||||||
attributes["timestamp"] = _get_day_midnight_timestamp(key, time=time)
|
attributes["timestamp"] = _get_day_midnight_timestamp(key, time=time)
|
||||||
# Add alternate average attribute
|
|
||||||
add_alternate_average_attribute(
|
|
||||||
attributes,
|
|
||||||
cached_data,
|
|
||||||
key, # base_key = key itself ("average_price_today" or "average_price_tomorrow")
|
|
||||||
config_entry=config_entry,
|
|
||||||
)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
# Daily aggregated level/rating sensors - show midnight to indicate whole day
|
# Daily aggregated level/rating sensors - show midnight to indicate whole day
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,6 @@ from __future__ import annotations
|
||||||
|
|
||||||
from typing import TYPE_CHECKING
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import get_display_unit_factor
|
|
||||||
from custom_components.tibber_prices.coordinator.helpers import get_intervals_for_day_offsets
|
from custom_components.tibber_prices.coordinator.helpers import get_intervals_for_day_offsets
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
|
@ -12,22 +11,17 @@ if TYPE_CHECKING:
|
||||||
TibberPricesDataUpdateCoordinator,
|
TibberPricesDataUpdateCoordinator,
|
||||||
)
|
)
|
||||||
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
||||||
from custom_components.tibber_prices.data import TibberPricesConfigEntry
|
|
||||||
|
|
||||||
from .helpers import add_alternate_average_attribute
|
|
||||||
|
|
||||||
# Constants
|
# Constants
|
||||||
MAX_FORECAST_INTERVALS = 8 # Show up to 8 future intervals (2 hours with 15-min intervals)
|
MAX_FORECAST_INTERVALS = 8 # Show up to 8 future intervals (2 hours with 15-min intervals)
|
||||||
|
|
||||||
|
|
||||||
def add_next_avg_attributes( # noqa: PLR0913
|
def add_next_avg_attributes(
|
||||||
attributes: dict,
|
attributes: dict,
|
||||||
key: str,
|
key: str,
|
||||||
coordinator: TibberPricesDataUpdateCoordinator,
|
coordinator: TibberPricesDataUpdateCoordinator,
|
||||||
*,
|
*,
|
||||||
time: TibberPricesTimeService,
|
time: TibberPricesTimeService,
|
||||||
cached_data: dict | None = None,
|
|
||||||
config_entry: TibberPricesConfigEntry | None = None,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Add attributes for next N hours average price sensors.
|
Add attributes for next N hours average price sensors.
|
||||||
|
|
@ -37,8 +31,6 @@ def add_next_avg_attributes( # noqa: PLR0913
|
||||||
key: The sensor entity key
|
key: The sensor entity key
|
||||||
coordinator: The data update coordinator
|
coordinator: The data update coordinator
|
||||||
time: TibberPricesTimeService instance (required)
|
time: TibberPricesTimeService instance (required)
|
||||||
cached_data: Optional cached data dictionary for median values
|
|
||||||
config_entry: Optional config entry for user preferences
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Extract hours from sensor key (e.g., "next_avg_3h" -> 3)
|
# Extract hours from sensor key (e.g., "next_avg_3h" -> 3)
|
||||||
|
|
@ -70,35 +62,23 @@ def add_next_avg_attributes( # noqa: PLR0913
|
||||||
attributes["interval_count"] = len(intervals_in_window)
|
attributes["interval_count"] = len(intervals_in_window)
|
||||||
attributes["hours"] = hours
|
attributes["hours"] = hours
|
||||||
|
|
||||||
# Add alternate average attribute if available in cached_data
|
|
||||||
if cached_data and config_entry:
|
|
||||||
base_key = f"next_avg_{hours}h"
|
|
||||||
add_alternate_average_attribute(
|
|
||||||
attributes,
|
|
||||||
cached_data,
|
|
||||||
base_key,
|
|
||||||
config_entry=config_entry,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def get_future_prices(
|
def get_future_prices(
|
||||||
coordinator: TibberPricesDataUpdateCoordinator,
|
coordinator: TibberPricesDataUpdateCoordinator,
|
||||||
max_intervals: int | None = None,
|
max_intervals: int | None = None,
|
||||||
*,
|
*,
|
||||||
time: TibberPricesTimeService,
|
time: TibberPricesTimeService,
|
||||||
config_entry: TibberPricesConfigEntry,
|
|
||||||
) -> list[dict] | None:
|
) -> list[dict] | None:
|
||||||
"""
|
"""
|
||||||
Get future price data for multiple upcoming intervals.
|
Get future price data for multiple upcoming intervals.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
coordinator: The data update coordinator.
|
coordinator: The data update coordinator
|
||||||
max_intervals: Maximum number of future intervals to return.
|
max_intervals: Maximum number of future intervals to return
|
||||||
time: TibberPricesTimeService instance (required).
|
time: TibberPricesTimeService instance (required)
|
||||||
config_entry: Config entry to get display unit configuration.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List of upcoming price intervals with timestamps and prices.
|
List of upcoming price intervals with timestamps and prices
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if not coordinator.data:
|
if not coordinator.data:
|
||||||
|
|
@ -139,17 +119,12 @@ def get_future_prices(
|
||||||
else:
|
else:
|
||||||
day_key = "unknown"
|
day_key = "unknown"
|
||||||
|
|
||||||
# Convert to display currency unit based on configuration
|
|
||||||
price_major = float(price_data["total"])
|
|
||||||
factor = get_display_unit_factor(config_entry)
|
|
||||||
price_display = round(price_major * factor, 2)
|
|
||||||
|
|
||||||
future_prices.append(
|
future_prices.append(
|
||||||
{
|
{
|
||||||
"interval_start": starts_at,
|
"interval_start": starts_at,
|
||||||
"interval_end": interval_end,
|
"interval_end": interval_end,
|
||||||
"price": price_major,
|
"price": float(price_data["total"]),
|
||||||
"price_minor": price_display,
|
"price_minor": round(float(price_data["total"]) * 100, 2),
|
||||||
"level": price_data.get("level", "NORMAL"),
|
"level": price_data.get("level", "NORMAL"),
|
||||||
"rating": price_data.get("difference", None),
|
"rating": price_data.get("difference", None),
|
||||||
"rating_level": price_data.get("rating_level"),
|
"rating_level": price_data.get("rating_level"),
|
||||||
|
|
|
||||||
|
|
@ -1,41 +0,0 @@
|
||||||
"""Helper functions for sensor attributes."""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from typing import TYPE_CHECKING
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from custom_components.tibber_prices.data import TibberPricesConfigEntry
|
|
||||||
|
|
||||||
|
|
||||||
def add_alternate_average_attribute(
|
|
||||||
attributes: dict,
|
|
||||||
cached_data: dict,
|
|
||||||
base_key: str,
|
|
||||||
*,
|
|
||||||
config_entry: TibberPricesConfigEntry, # noqa: ARG001
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Add both average values (mean and median) as attributes.
|
|
||||||
|
|
||||||
This ensures automations work consistently regardless of which value
|
|
||||||
is displayed in the state. Both values are always available as attributes.
|
|
||||||
|
|
||||||
Note: To avoid duplicate recording, the value used as state should be
|
|
||||||
excluded from recorder via dynamic _unrecorded_attributes in sensor core.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
attributes: Dictionary to add attribute to
|
|
||||||
cached_data: Cached calculation data containing mean/median values
|
|
||||||
base_key: Base key for cached values (e.g., "average_price_today", "rolling_hour_0")
|
|
||||||
config_entry: Config entry for user preferences (used to determine which value is in state)
|
|
||||||
|
|
||||||
"""
|
|
||||||
# Always add both mean and median values as attributes
|
|
||||||
mean_value = cached_data.get(f"{base_key}_mean")
|
|
||||||
if mean_value is not None:
|
|
||||||
attributes["price_mean"] = mean_value
|
|
||||||
|
|
||||||
median_value = cached_data.get(f"{base_key}_median")
|
|
||||||
if median_value is not None:
|
|
||||||
attributes["price_median"] = median_value
|
|
||||||
|
|
@ -17,78 +17,10 @@ if TYPE_CHECKING:
|
||||||
TibberPricesDataUpdateCoordinator,
|
TibberPricesDataUpdateCoordinator,
|
||||||
)
|
)
|
||||||
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
||||||
from custom_components.tibber_prices.data import TibberPricesConfigEntry
|
|
||||||
|
|
||||||
from .helpers import add_alternate_average_attribute
|
|
||||||
from .metadata import get_current_interval_data
|
from .metadata import get_current_interval_data
|
||||||
|
|
||||||
|
|
||||||
def _get_interval_data_for_attributes(
|
|
||||||
key: str,
|
|
||||||
coordinator: TibberPricesDataUpdateCoordinator,
|
|
||||||
attributes: dict,
|
|
||||||
*,
|
|
||||||
time: TibberPricesTimeService,
|
|
||||||
) -> dict | None:
|
|
||||||
"""
|
|
||||||
Get interval data and set timestamp based on sensor type.
|
|
||||||
|
|
||||||
Refactored to reduce branch complexity in main function.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
key: The sensor entity key
|
|
||||||
coordinator: The data update coordinator
|
|
||||||
attributes: Attributes dict to update with timestamp if needed
|
|
||||||
time: TibberPricesTimeService instance
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Interval data if found, None otherwise
|
|
||||||
|
|
||||||
"""
|
|
||||||
now = time.now()
|
|
||||||
|
|
||||||
# Current/next price sensors - override timestamp with interval's startsAt
|
|
||||||
next_sensors = ["next_interval_price", "next_interval_price_level", "next_interval_price_rating"]
|
|
||||||
prev_sensors = ["previous_interval_price", "previous_interval_price_level", "previous_interval_price_rating"]
|
|
||||||
next_hour = ["next_hour_average_price", "next_hour_price_level", "next_hour_price_rating"]
|
|
||||||
curr_interval = ["current_interval_price", "current_interval_price_base"]
|
|
||||||
curr_hour = ["current_hour_average_price", "current_hour_price_level", "current_hour_price_rating"]
|
|
||||||
|
|
||||||
if key in next_sensors:
|
|
||||||
target_time = time.get_next_interval_start()
|
|
||||||
interval_data = find_price_data_for_interval(coordinator.data, target_time, time=time)
|
|
||||||
if interval_data:
|
|
||||||
attributes["timestamp"] = interval_data["startsAt"]
|
|
||||||
return interval_data
|
|
||||||
|
|
||||||
if key in prev_sensors:
|
|
||||||
target_time = time.get_interval_offset_time(-1)
|
|
||||||
interval_data = find_price_data_for_interval(coordinator.data, target_time, time=time)
|
|
||||||
if interval_data:
|
|
||||||
attributes["timestamp"] = interval_data["startsAt"]
|
|
||||||
return interval_data
|
|
||||||
|
|
||||||
if key in next_hour:
|
|
||||||
target_time = now + timedelta(hours=1)
|
|
||||||
interval_data = find_price_data_for_interval(coordinator.data, target_time, time=time)
|
|
||||||
if interval_data:
|
|
||||||
attributes["timestamp"] = interval_data["startsAt"]
|
|
||||||
return interval_data
|
|
||||||
|
|
||||||
# Current interval sensors (both variants)
|
|
||||||
if key in curr_interval:
|
|
||||||
interval_data = get_current_interval_data(coordinator, time=time)
|
|
||||||
if interval_data and "startsAt" in interval_data:
|
|
||||||
attributes["timestamp"] = interval_data["startsAt"]
|
|
||||||
return interval_data
|
|
||||||
|
|
||||||
# Current hour sensors - keep default timestamp
|
|
||||||
if key in curr_hour:
|
|
||||||
return get_current_interval_data(coordinator, time=time)
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def add_current_interval_price_attributes( # noqa: PLR0913
|
def add_current_interval_price_attributes( # noqa: PLR0913
|
||||||
attributes: dict,
|
attributes: dict,
|
||||||
key: str,
|
key: str,
|
||||||
|
|
@ -97,7 +29,6 @@ def add_current_interval_price_attributes( # noqa: PLR0913
|
||||||
cached_data: dict,
|
cached_data: dict,
|
||||||
*,
|
*,
|
||||||
time: TibberPricesTimeService,
|
time: TibberPricesTimeService,
|
||||||
config_entry: TibberPricesConfigEntry,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Add attributes for current interval price sensors.
|
Add attributes for current interval price sensors.
|
||||||
|
|
@ -109,19 +40,64 @@ def add_current_interval_price_attributes( # noqa: PLR0913
|
||||||
native_value: The current native value of the sensor
|
native_value: The current native value of the sensor
|
||||||
cached_data: Dictionary containing cached sensor data
|
cached_data: Dictionary containing cached sensor data
|
||||||
time: TibberPricesTimeService instance (required)
|
time: TibberPricesTimeService instance (required)
|
||||||
config_entry: Config entry for user preferences
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Get interval data and handle timestamp overrides
|
now = time.now()
|
||||||
interval_data = _get_interval_data_for_attributes(key, coordinator, attributes, time=time)
|
|
||||||
|
# Determine which interval to use based on sensor type
|
||||||
|
next_interval_sensors = [
|
||||||
|
"next_interval_price",
|
||||||
|
"next_interval_price_level",
|
||||||
|
"next_interval_price_rating",
|
||||||
|
]
|
||||||
|
previous_interval_sensors = [
|
||||||
|
"previous_interval_price",
|
||||||
|
"previous_interval_price_level",
|
||||||
|
"previous_interval_price_rating",
|
||||||
|
]
|
||||||
|
next_hour_sensors = [
|
||||||
|
"next_hour_average_price",
|
||||||
|
"next_hour_price_level",
|
||||||
|
"next_hour_price_rating",
|
||||||
|
]
|
||||||
|
current_hour_sensors = [
|
||||||
|
"current_hour_average_price",
|
||||||
|
"current_hour_price_level",
|
||||||
|
"current_hour_price_rating",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Set interval data based on sensor type
|
||||||
|
# For sensors showing data from OTHER intervals (next/previous), override timestamp with that interval's startsAt
|
||||||
|
# For current interval sensors, keep the default platform timestamp (calculation time)
|
||||||
|
interval_data = None
|
||||||
|
if key in next_interval_sensors:
|
||||||
|
target_time = time.get_next_interval_start()
|
||||||
|
interval_data = find_price_data_for_interval(coordinator.data, target_time, time=time)
|
||||||
|
# Override timestamp with the NEXT interval's startsAt (when that interval starts)
|
||||||
|
if interval_data:
|
||||||
|
attributes["timestamp"] = interval_data["startsAt"]
|
||||||
|
elif key in previous_interval_sensors:
|
||||||
|
target_time = time.get_interval_offset_time(-1)
|
||||||
|
interval_data = find_price_data_for_interval(coordinator.data, target_time, time=time)
|
||||||
|
# Override timestamp with the PREVIOUS interval's startsAt
|
||||||
|
if interval_data:
|
||||||
|
attributes["timestamp"] = interval_data["startsAt"]
|
||||||
|
elif key in next_hour_sensors:
|
||||||
|
target_time = now + timedelta(hours=1)
|
||||||
|
interval_data = find_price_data_for_interval(coordinator.data, target_time, time=time)
|
||||||
|
# Override timestamp with the center of the next rolling hour window
|
||||||
|
if interval_data:
|
||||||
|
attributes["timestamp"] = interval_data["startsAt"]
|
||||||
|
elif key in current_hour_sensors:
|
||||||
|
current_interval_data = get_current_interval_data(coordinator, time=time)
|
||||||
|
# Keep default timestamp (when calculation was made) for current hour sensors
|
||||||
|
else:
|
||||||
|
current_interval_data = get_current_interval_data(coordinator, time=time)
|
||||||
|
interval_data = current_interval_data # Use current_interval_data as interval_data for current_interval_price
|
||||||
|
# Keep default timestamp (current calculation time) for current interval sensors
|
||||||
|
|
||||||
# Add icon_color for price sensors (based on their price level)
|
# Add icon_color for price sensors (based on their price level)
|
||||||
if key in [
|
if key in ["current_interval_price", "next_interval_price", "previous_interval_price"]:
|
||||||
"current_interval_price",
|
|
||||||
"current_interval_price_base",
|
|
||||||
"next_interval_price",
|
|
||||||
"previous_interval_price",
|
|
||||||
]:
|
|
||||||
# For interval-based price sensors, get level from interval_data
|
# For interval-based price sensors, get level from interval_data
|
||||||
if interval_data and "level" in interval_data:
|
if interval_data and "level" in interval_data:
|
||||||
level = interval_data["level"]
|
level = interval_data["level"]
|
||||||
|
|
@ -132,15 +108,6 @@ def add_current_interval_price_attributes( # noqa: PLR0913
|
||||||
if level:
|
if level:
|
||||||
add_icon_color_attribute(attributes, key="price_level", state_value=level)
|
add_icon_color_attribute(attributes, key="price_level", state_value=level)
|
||||||
|
|
||||||
# Add alternate average attribute for rolling hour average price sensors
|
|
||||||
base_key = "rolling_hour_0" if key == "current_hour_average_price" else "rolling_hour_1"
|
|
||||||
add_alternate_average_attribute(
|
|
||||||
attributes,
|
|
||||||
cached_data,
|
|
||||||
base_key,
|
|
||||||
config_entry=config_entry,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add price level attributes for all level sensors
|
# Add price level attributes for all level sensors
|
||||||
add_level_attributes_for_sensor(
|
add_level_attributes_for_sensor(
|
||||||
attributes=attributes,
|
attributes=attributes,
|
||||||
|
|
|
||||||
|
|
@ -1,24 +1,4 @@
|
||||||
"""
|
"""Attribute builders for lifecycle diagnostic sensor."""
|
||||||
Attribute builders for lifecycle diagnostic sensor.
|
|
||||||
|
|
||||||
This sensor uses event-based updates with state-change filtering to minimize
|
|
||||||
recorder entries. Only attributes that are relevant to the lifecycle STATE
|
|
||||||
are included here - attributes that change independently of state belong
|
|
||||||
in a separate sensor or diagnostics.
|
|
||||||
|
|
||||||
Included attributes (update only on state change):
|
|
||||||
- tomorrow_available: Whether tomorrow's price data is available
|
|
||||||
- next_api_poll: When the next API poll will occur (builds user trust)
|
|
||||||
- updates_today: Number of API calls made today
|
|
||||||
- last_turnover: When the last midnight turnover occurred
|
|
||||||
- last_error: Details of the last error (if any)
|
|
||||||
|
|
||||||
Pool statistics (sensor_intervals_count, cache_fill_percent, etc.) are
|
|
||||||
intentionally NOT included here because they change independently of
|
|
||||||
the lifecycle state. With state-change filtering, these would become
|
|
||||||
stale. Pool statistics are available via diagnostics or could be
|
|
||||||
exposed as a separate sensor if needed.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
|
@ -33,6 +13,11 @@ if TYPE_CHECKING:
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Constants for cache age formatting
|
||||||
|
MINUTES_PER_HOUR = 60
|
||||||
|
MINUTES_PER_DAY = 1440 # 24 * 60
|
||||||
|
|
||||||
|
|
||||||
def build_lifecycle_attributes(
|
def build_lifecycle_attributes(
|
||||||
coordinator: TibberPricesDataUpdateCoordinator,
|
coordinator: TibberPricesDataUpdateCoordinator,
|
||||||
lifecycle_calculator: TibberPricesLifecycleCalculator,
|
lifecycle_calculator: TibberPricesLifecycleCalculator,
|
||||||
|
|
@ -40,11 +25,7 @@ def build_lifecycle_attributes(
|
||||||
"""
|
"""
|
||||||
Build attributes for data_lifecycle_status sensor.
|
Build attributes for data_lifecycle_status sensor.
|
||||||
|
|
||||||
Event-based updates with state-change filtering - attributes only update
|
Shows comprehensive cache status, data availability, and update timing.
|
||||||
when the lifecycle STATE changes (fresh→cached, cached→turnover_pending, etc.).
|
|
||||||
|
|
||||||
Only includes attributes that are directly relevant to the lifecycle state.
|
|
||||||
Pool statistics are intentionally excluded to avoid stale data.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dict with lifecycle attributes
|
Dict with lifecycle attributes
|
||||||
|
|
@ -52,31 +33,57 @@ def build_lifecycle_attributes(
|
||||||
"""
|
"""
|
||||||
attributes: dict[str, Any] = {}
|
attributes: dict[str, Any] = {}
|
||||||
|
|
||||||
# === Tomorrow Data Status ===
|
# Cache Status (formatted for readability)
|
||||||
# Critical for understanding lifecycle state transitions
|
cache_age = lifecycle_calculator.get_cache_age_minutes()
|
||||||
attributes["tomorrow_available"] = lifecycle_calculator.has_tomorrow_data()
|
if cache_age is not None:
|
||||||
|
# Format cache age with units for better readability
|
||||||
|
if cache_age < MINUTES_PER_HOUR:
|
||||||
|
attributes["cache_age"] = f"{cache_age} min"
|
||||||
|
elif cache_age < MINUTES_PER_DAY: # Less than 24 hours
|
||||||
|
hours = cache_age // MINUTES_PER_HOUR
|
||||||
|
minutes = cache_age % MINUTES_PER_HOUR
|
||||||
|
attributes["cache_age"] = f"{hours}h {minutes}min" if minutes > 0 else f"{hours}h"
|
||||||
|
else: # 24+ hours
|
||||||
|
days = cache_age // MINUTES_PER_DAY
|
||||||
|
hours = (cache_age % MINUTES_PER_DAY) // MINUTES_PER_HOUR
|
||||||
|
attributes["cache_age"] = f"{days}d {hours}h" if hours > 0 else f"{days}d"
|
||||||
|
|
||||||
# === Next API Poll Time ===
|
# Keep raw value for automations
|
||||||
# Builds user trust: shows when the integration will check for tomorrow data
|
attributes["cache_age_minutes"] = cache_age
|
||||||
# - Before 13:00: Shows today 13:00 (when tomorrow-search begins)
|
|
||||||
# - After 13:00 without tomorrow data: Shows next Timer #1 execution (active polling)
|
cache_validity = lifecycle_calculator.get_cache_validity_status()
|
||||||
# - After 13:00 with tomorrow data: Shows tomorrow 13:00 (predictive)
|
attributes["cache_validity"] = cache_validity
|
||||||
|
|
||||||
|
if coordinator._last_price_update: # noqa: SLF001 - Internal state access for diagnostic display
|
||||||
|
attributes["last_api_fetch"] = coordinator._last_price_update.isoformat() # noqa: SLF001
|
||||||
|
attributes["last_cache_update"] = coordinator._last_price_update.isoformat() # noqa: SLF001
|
||||||
|
|
||||||
|
# Data Availability & Completeness
|
||||||
|
data_completeness = lifecycle_calculator.get_data_completeness_status()
|
||||||
|
attributes["data_completeness"] = data_completeness
|
||||||
|
|
||||||
|
attributes["yesterday_available"] = lifecycle_calculator.is_data_available(-1)
|
||||||
|
attributes["today_available"] = lifecycle_calculator.is_data_available(0)
|
||||||
|
attributes["tomorrow_available"] = lifecycle_calculator.is_data_available(1)
|
||||||
|
attributes["tomorrow_expected_after"] = "13:00"
|
||||||
|
|
||||||
|
# Next Actions (only show if meaningful)
|
||||||
next_poll = lifecycle_calculator.get_next_api_poll_time()
|
next_poll = lifecycle_calculator.get_next_api_poll_time()
|
||||||
if next_poll:
|
if next_poll: # None means data is complete, no more polls needed
|
||||||
attributes["next_api_poll"] = next_poll.isoformat()
|
attributes["next_api_poll"] = next_poll.isoformat()
|
||||||
|
|
||||||
# === Update Statistics ===
|
next_midnight = lifecycle_calculator.get_next_midnight_turnover_time()
|
||||||
# Shows API activity - resets at midnight with turnover
|
attributes["next_midnight_turnover"] = next_midnight.isoformat()
|
||||||
|
|
||||||
|
# Update Statistics
|
||||||
api_calls = lifecycle_calculator.get_api_calls_today()
|
api_calls = lifecycle_calculator.get_api_calls_today()
|
||||||
attributes["updates_today"] = api_calls
|
attributes["updates_today"] = api_calls
|
||||||
|
|
||||||
# === Midnight Turnover Info ===
|
# Last Turnover Time (from midnight handler)
|
||||||
# When was the last successful data rotation
|
if coordinator._midnight_handler.last_turnover_time: # noqa: SLF001 - Internal state access for diagnostic display
|
||||||
if coordinator._midnight_handler.last_turnover_time: # noqa: SLF001
|
|
||||||
attributes["last_turnover"] = coordinator._midnight_handler.last_turnover_time.isoformat() # noqa: SLF001
|
attributes["last_turnover"] = coordinator._midnight_handler.last_turnover_time.isoformat() # noqa: SLF001
|
||||||
|
|
||||||
# === Error Status ===
|
# Last Error (if any)
|
||||||
# Present only when there's an active error
|
|
||||||
if coordinator.last_exception:
|
if coordinator.last_exception:
|
||||||
attributes["last_error"] = str(coordinator.last_exception)
|
attributes["last_error"] = str(coordinator.last_exception)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -13,17 +13,6 @@ if TYPE_CHECKING:
|
||||||
TIMER_30_SEC_BOUNDARY = 30
|
TIMER_30_SEC_BOUNDARY = 30
|
||||||
|
|
||||||
|
|
||||||
def _hours_to_minutes(state_value: Any) -> int | None:
|
|
||||||
"""Convert hour-based state back to rounded minutes for attributes."""
|
|
||||||
if state_value is None:
|
|
||||||
return None
|
|
||||||
|
|
||||||
try:
|
|
||||||
return round(float(state_value) * 60)
|
|
||||||
except (TypeError, ValueError):
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def _is_timing_or_volatility_sensor(key: str) -> bool:
|
def _is_timing_or_volatility_sensor(key: str) -> bool:
|
||||||
"""Check if sensor is a timing or volatility sensor."""
|
"""Check if sensor is a timing or volatility sensor."""
|
||||||
return key.endswith("_volatility") or (
|
return key.endswith("_volatility") or (
|
||||||
|
|
@ -80,16 +69,5 @@ def add_period_timing_attributes(
|
||||||
|
|
||||||
attributes["timestamp"] = timestamp
|
attributes["timestamp"] = timestamp
|
||||||
|
|
||||||
# Add minute-precision attributes for hour-based states to keep automation-friendly values
|
|
||||||
minute_value = _hours_to_minutes(state_value)
|
|
||||||
|
|
||||||
if minute_value is not None:
|
|
||||||
if key.endswith("period_duration"):
|
|
||||||
attributes["period_duration_minutes"] = minute_value
|
|
||||||
elif key.endswith("remaining_minutes"):
|
|
||||||
attributes["remaining_minutes"] = minute_value
|
|
||||||
elif key.endswith("next_in_minutes"):
|
|
||||||
attributes["next_in_minutes"] = minute_value
|
|
||||||
|
|
||||||
# Add icon_color for dynamic styling
|
# Add icon_color for dynamic styling
|
||||||
add_icon_color_attribute(attributes, key=key, state_value=state_value)
|
add_icon_color_attribute(attributes, key=key, state_value=state_value)
|
||||||
|
|
|
||||||
|
|
@ -153,11 +153,15 @@ def add_volatility_type_attributes(
|
||||||
|
|
||||||
if today_prices:
|
if today_prices:
|
||||||
today_vol = calculate_volatility_level(today_prices, **thresholds)
|
today_vol = calculate_volatility_level(today_prices, **thresholds)
|
||||||
|
today_spread = (max(today_prices) - min(today_prices)) * 100
|
||||||
|
volatility_attributes["today_spread"] = round(today_spread, 2)
|
||||||
volatility_attributes["today_volatility"] = today_vol
|
volatility_attributes["today_volatility"] = today_vol
|
||||||
volatility_attributes["interval_count_today"] = len(today_prices)
|
volatility_attributes["interval_count_today"] = len(today_prices)
|
||||||
|
|
||||||
if tomorrow_prices:
|
if tomorrow_prices:
|
||||||
tomorrow_vol = calculate_volatility_level(tomorrow_prices, **thresholds)
|
tomorrow_vol = calculate_volatility_level(tomorrow_prices, **thresholds)
|
||||||
|
tomorrow_spread = (max(tomorrow_prices) - min(tomorrow_prices)) * 100
|
||||||
|
volatility_attributes["tomorrow_spread"] = round(tomorrow_spread, 2)
|
||||||
volatility_attributes["tomorrow_volatility"] = tomorrow_vol
|
volatility_attributes["tomorrow_volatility"] = tomorrow_vol
|
||||||
volatility_attributes["interval_count_tomorrow"] = len(tomorrow_prices)
|
volatility_attributes["interval_count_tomorrow"] = len(tomorrow_prices)
|
||||||
elif volatility_type == "next_24h":
|
elif volatility_type == "next_24h":
|
||||||
|
|
|
||||||
|
|
@ -11,9 +11,6 @@ if TYPE_CHECKING:
|
||||||
TibberPricesDataUpdateCoordinator,
|
TibberPricesDataUpdateCoordinator,
|
||||||
)
|
)
|
||||||
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
||||||
from custom_components.tibber_prices.data import TibberPricesConfigEntry
|
|
||||||
|
|
||||||
from .helpers import add_alternate_average_attribute
|
|
||||||
|
|
||||||
|
|
||||||
def _update_extreme_interval(extreme_interval: dict | None, price_data: dict, key: str) -> dict:
|
def _update_extreme_interval(extreme_interval: dict | None, price_data: dict, key: str) -> dict:
|
||||||
|
|
@ -43,14 +40,12 @@ def _update_extreme_interval(extreme_interval: dict | None, price_data: dict, ke
|
||||||
return price_data if is_new_extreme else extreme_interval
|
return price_data if is_new_extreme else extreme_interval
|
||||||
|
|
||||||
|
|
||||||
def add_average_price_attributes( # noqa: PLR0913
|
def add_average_price_attributes(
|
||||||
attributes: dict,
|
attributes: dict,
|
||||||
key: str,
|
key: str,
|
||||||
coordinator: TibberPricesDataUpdateCoordinator,
|
coordinator: TibberPricesDataUpdateCoordinator,
|
||||||
*,
|
*,
|
||||||
time: TibberPricesTimeService,
|
time: TibberPricesTimeService,
|
||||||
cached_data: dict | None = None,
|
|
||||||
config_entry: TibberPricesConfigEntry | None = None,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Add attributes for trailing and leading average/min/max price sensors.
|
Add attributes for trailing and leading average/min/max price sensors.
|
||||||
|
|
@ -60,8 +55,6 @@ def add_average_price_attributes( # noqa: PLR0913
|
||||||
key: The sensor entity key
|
key: The sensor entity key
|
||||||
coordinator: The data update coordinator
|
coordinator: The data update coordinator
|
||||||
time: TibberPricesTimeService instance (required)
|
time: TibberPricesTimeService instance (required)
|
||||||
cached_data: Optional cached data dictionary for median values
|
|
||||||
config_entry: Optional config entry for user preferences
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Determine if this is trailing or leading
|
# Determine if this is trailing or leading
|
||||||
|
|
@ -105,13 +98,3 @@ def add_average_price_attributes( # noqa: PLR0913
|
||||||
attributes["timestamp"] = intervals_in_window[0].get("startsAt")
|
attributes["timestamp"] = intervals_in_window[0].get("startsAt")
|
||||||
|
|
||||||
attributes["interval_count"] = len(intervals_in_window)
|
attributes["interval_count"] = len(intervals_in_window)
|
||||||
|
|
||||||
# Add alternate average attribute for average sensors if available in cached_data
|
|
||||||
if cached_data and config_entry and "average" in key:
|
|
||||||
base_key = key.replace("_average", "")
|
|
||||||
add_alternate_average_attribute(
|
|
||||||
attributes,
|
|
||||||
cached_data,
|
|
||||||
base_key,
|
|
||||||
config_entry=config_entry,
|
|
||||||
)
|
|
||||||
|
|
|
||||||
|
|
@ -49,8 +49,8 @@ class TibberPricesDailyStatCalculator(TibberPricesBaseCalculator):
|
||||||
self,
|
self,
|
||||||
*,
|
*,
|
||||||
day: str = "today",
|
day: str = "today",
|
||||||
stat_func: Callable[[list[float]], float] | Callable[[list[float]], tuple[float, float | None]],
|
stat_func: Callable[[list[float]], float],
|
||||||
) -> float | tuple[float, float | None] | None:
|
) -> float | None:
|
||||||
"""
|
"""
|
||||||
Unified method for daily statistics (min/max/avg within calendar day).
|
Unified method for daily statistics (min/max/avg within calendar day).
|
||||||
|
|
||||||
|
|
@ -59,12 +59,10 @@ class TibberPricesDailyStatCalculator(TibberPricesBaseCalculator):
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
day: "today" or "tomorrow" - which calendar day to calculate for.
|
day: "today" or "tomorrow" - which calendar day to calculate for.
|
||||||
stat_func: Statistical function (min, max, or lambda for avg/median).
|
stat_func: Statistical function (min, max, or lambda for avg).
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Price value in subunit currency units (cents/øre), or None if unavailable.
|
Price value in minor currency units (cents/øre), or None if unavailable.
|
||||||
For average functions: tuple of (avg, median) where median may be None.
|
|
||||||
For min/max functions: single float value.
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if not self.has_data():
|
if not self.has_data():
|
||||||
|
|
@ -99,25 +97,7 @@ class TibberPricesDailyStatCalculator(TibberPricesBaseCalculator):
|
||||||
|
|
||||||
# Find the extreme value and store its interval for later use in attributes
|
# Find the extreme value and store its interval for later use in attributes
|
||||||
prices = [pi["price"] for pi in price_intervals]
|
prices = [pi["price"] for pi in price_intervals]
|
||||||
result = stat_func(prices)
|
value = stat_func(prices)
|
||||||
|
|
||||||
# Check if result is a tuple (avg, median) from average functions
|
|
||||||
if isinstance(result, tuple):
|
|
||||||
value, median = result
|
|
||||||
# Store the interval (for avg, use first interval as reference)
|
|
||||||
if price_intervals:
|
|
||||||
self._last_extreme_interval = price_intervals[0]["interval"]
|
|
||||||
# Convert to display currency units based on config
|
|
||||||
avg_result = round(get_price_value(value, config_entry=self.coordinator.config_entry), 2)
|
|
||||||
median_result = (
|
|
||||||
round(get_price_value(median, config_entry=self.coordinator.config_entry), 2)
|
|
||||||
if median is not None
|
|
||||||
else None
|
|
||||||
)
|
|
||||||
return avg_result, median_result
|
|
||||||
|
|
||||||
# Single value result (min/max functions)
|
|
||||||
value = result
|
|
||||||
|
|
||||||
# Store the interval with the extreme price for use in attributes
|
# Store the interval with the extreme price for use in attributes
|
||||||
for pi in price_intervals:
|
for pi in price_intervals:
|
||||||
|
|
@ -125,8 +105,8 @@ class TibberPricesDailyStatCalculator(TibberPricesBaseCalculator):
|
||||||
self._last_extreme_interval = pi["interval"]
|
self._last_extreme_interval = pi["interval"]
|
||||||
break
|
break
|
||||||
|
|
||||||
# Return in configured display currency units with 2 decimals
|
# Always return in minor currency units (cents/øre) with 2 decimals
|
||||||
result = get_price_value(value, config_entry=self.coordinator.config_entry)
|
result = get_price_value(value, in_euro=False)
|
||||||
return round(result, 2)
|
return round(result, 2)
|
||||||
|
|
||||||
def get_daily_aggregated_value(
|
def get_daily_aggregated_value(
|
||||||
|
|
|
||||||
|
|
@ -4,8 +4,6 @@ from __future__ import annotations
|
||||||
|
|
||||||
from typing import TYPE_CHECKING
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import get_display_unit_factor
|
|
||||||
|
|
||||||
from .base import TibberPricesBaseCalculator
|
from .base import TibberPricesBaseCalculator
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
|
@ -36,7 +34,7 @@ class TibberPricesIntervalCalculator(TibberPricesBaseCalculator):
|
||||||
self._last_rating_level: str | None = None
|
self._last_rating_level: str | None = None
|
||||||
self._last_rating_difference: float | None = None
|
self._last_rating_difference: float | None = None
|
||||||
|
|
||||||
def get_interval_value( # noqa: PLR0911
|
def get_interval_value(
|
||||||
self,
|
self,
|
||||||
*,
|
*,
|
||||||
interval_offset: int,
|
interval_offset: int,
|
||||||
|
|
@ -70,11 +68,7 @@ class TibberPricesIntervalCalculator(TibberPricesBaseCalculator):
|
||||||
if price is None:
|
if price is None:
|
||||||
return None
|
return None
|
||||||
price = float(price)
|
price = float(price)
|
||||||
# Return in base currency if in_euro=True, otherwise in display unit
|
return price if in_euro else round(price * 100, 2)
|
||||||
if in_euro:
|
|
||||||
return price
|
|
||||||
factor = get_display_unit_factor(self.config_entry)
|
|
||||||
return round(price * factor, 2)
|
|
||||||
|
|
||||||
if value_type == "level":
|
if value_type == "level":
|
||||||
level = self.safe_get_from_interval(interval_data, "level")
|
level = self.safe_get_from_interval(interval_data, "level")
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,11 @@
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from datetime import datetime, timedelta
|
from datetime import timedelta
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
from custom_components.tibber_prices.coordinator.constants import UPDATE_INTERVAL
|
from custom_components.tibber_prices.coordinator.constants import UPDATE_INTERVAL
|
||||||
|
|
||||||
|
|
@ -13,6 +17,10 @@ FRESH_DATA_THRESHOLD_MINUTES = 5 # Data is "fresh" within 5 minutes of API fetc
|
||||||
TOMORROW_CHECK_HOUR = 13 # After 13:00, we actively check for tomorrow data
|
TOMORROW_CHECK_HOUR = 13 # After 13:00, we actively check for tomorrow data
|
||||||
TURNOVER_WARNING_SECONDS = 900 # Warn 15 minutes before midnight (last quarter-hour: 23:45-00:00)
|
TURNOVER_WARNING_SECONDS = 900 # Warn 15 minutes before midnight (last quarter-hour: 23:45-00:00)
|
||||||
|
|
||||||
|
# Constants for 15-minute update boundaries (Timer #1)
|
||||||
|
QUARTER_HOUR_BOUNDARIES = [0, 15, 30, 45] # Minutes when Timer #1 can trigger
|
||||||
|
LAST_HOUR_OF_DAY = 23
|
||||||
|
|
||||||
|
|
||||||
class TibberPricesLifecycleCalculator(TibberPricesBaseCalculator):
|
class TibberPricesLifecycleCalculator(TibberPricesBaseCalculator):
|
||||||
"""Calculate data lifecycle status and metadata."""
|
"""Calculate data lifecycle status and metadata."""
|
||||||
|
|
@ -74,6 +82,15 @@ class TibberPricesLifecycleCalculator(TibberPricesBaseCalculator):
|
||||||
# Priority 6: Default - using cached data
|
# Priority 6: Default - using cached data
|
||||||
return "cached"
|
return "cached"
|
||||||
|
|
||||||
|
def get_cache_age_minutes(self) -> int | None:
|
||||||
|
"""Calculate how many minutes old the cached data is."""
|
||||||
|
coordinator = self.coordinator
|
||||||
|
if not coordinator._last_price_update: # noqa: SLF001 - Internal state access for lifecycle tracking
|
||||||
|
return None
|
||||||
|
|
||||||
|
age = coordinator.time.now() - coordinator._last_price_update # noqa: SLF001
|
||||||
|
return int(age.total_seconds() / 60)
|
||||||
|
|
||||||
def get_next_api_poll_time(self) -> datetime | None:
|
def get_next_api_poll_time(self) -> datetime | None:
|
||||||
"""
|
"""
|
||||||
Calculate when the next API poll attempt will occur.
|
Calculate when the next API poll attempt will occur.
|
||||||
|
|
@ -162,6 +179,117 @@ class TibberPricesLifecycleCalculator(TibberPricesBaseCalculator):
|
||||||
# Fallback: If we don't know timer offset yet, assume 13:00:00
|
# Fallback: If we don't know timer offset yet, assume 13:00:00
|
||||||
return tomorrow_13
|
return tomorrow_13
|
||||||
|
|
||||||
|
def get_next_midnight_turnover_time(self) -> datetime:
|
||||||
|
"""Calculate when the next midnight turnover will occur."""
|
||||||
|
coordinator = self.coordinator
|
||||||
|
current_time = coordinator.time.now()
|
||||||
|
now_local = coordinator.time.as_local(current_time)
|
||||||
|
|
||||||
|
# Next midnight
|
||||||
|
return now_local.replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(days=1)
|
||||||
|
|
||||||
|
def is_data_available(self, day_offset: int) -> bool:
|
||||||
|
"""
|
||||||
|
Check if data is available for a specific day.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
day_offset: Day offset (-1=yesterday, 0=today, 1=tomorrow)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if data exists and is not empty
|
||||||
|
|
||||||
|
"""
|
||||||
|
if not self.has_data():
|
||||||
|
return False
|
||||||
|
|
||||||
|
day_data = self.get_intervals(day_offset)
|
||||||
|
return bool(day_data)
|
||||||
|
|
||||||
|
def get_data_completeness_status(self) -> str:
|
||||||
|
"""
|
||||||
|
Get human-readable data completeness status.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
'complete': All data (yesterday/today/tomorrow) available
|
||||||
|
'missing_tomorrow': Only yesterday and today available
|
||||||
|
'missing_yesterday': Only today and tomorrow available
|
||||||
|
'partial': Only today or some other partial combination
|
||||||
|
'no_data': No data available at all
|
||||||
|
|
||||||
|
"""
|
||||||
|
yesterday_available = self.is_data_available(-1)
|
||||||
|
today_available = self.is_data_available(0)
|
||||||
|
tomorrow_available = self.is_data_available(1)
|
||||||
|
|
||||||
|
if yesterday_available and today_available and tomorrow_available:
|
||||||
|
return "complete"
|
||||||
|
if yesterday_available and today_available and not tomorrow_available:
|
||||||
|
return "missing_tomorrow"
|
||||||
|
if not yesterday_available and today_available and tomorrow_available:
|
||||||
|
return "missing_yesterday"
|
||||||
|
if today_available:
|
||||||
|
return "partial"
|
||||||
|
return "no_data"
|
||||||
|
|
||||||
|
def get_cache_validity_status(self) -> str:
|
||||||
|
"""
|
||||||
|
Get cache validity status.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
"valid": Cache is current and matches today's date
|
||||||
|
"stale": Cache exists but is outdated
|
||||||
|
"date_mismatch": Cache is from a different day
|
||||||
|
"empty": No cache data
|
||||||
|
|
||||||
|
"""
|
||||||
|
coordinator = self.coordinator
|
||||||
|
# Check if coordinator has data (transformed, ready for entities)
|
||||||
|
if not self.has_data():
|
||||||
|
return "empty"
|
||||||
|
|
||||||
|
# Check if we have price update timestamp
|
||||||
|
if not coordinator._last_price_update: # noqa: SLF001 - Internal state access for lifecycle tracking
|
||||||
|
return "empty"
|
||||||
|
|
||||||
|
current_time = coordinator.time.now()
|
||||||
|
current_local_date = coordinator.time.as_local(current_time).date()
|
||||||
|
last_update_local_date = coordinator.time.as_local(coordinator._last_price_update).date() # noqa: SLF001
|
||||||
|
|
||||||
|
if current_local_date != last_update_local_date:
|
||||||
|
return "date_mismatch"
|
||||||
|
|
||||||
|
# Check if cache is stale (older than expected)
|
||||||
|
# CRITICAL: After midnight turnover, _last_price_update is set to 00:00
|
||||||
|
# without new API data. The data is still valid (rotated yesterday→today).
|
||||||
|
#
|
||||||
|
# Cache is considered "valid" if EITHER:
|
||||||
|
# 1. Within normal update interval expectations (age ≤ 2 hours), OR
|
||||||
|
# 2. Coordinator update cycle ran recently (within last 30 minutes)
|
||||||
|
#
|
||||||
|
# Why check _last_coordinator_update?
|
||||||
|
# - After midnight turnover, _last_price_update stays at 00:00
|
||||||
|
# - But coordinator polls every 15 minutes and validates cache
|
||||||
|
# - If coordinator ran recently, cache was checked and deemed valid
|
||||||
|
# - This prevents false "stale" status when using rotated data
|
||||||
|
|
||||||
|
age = current_time - coordinator._last_price_update # noqa: SLF001
|
||||||
|
|
||||||
|
# If cache age is within normal expectations (≤2 hours), it's valid
|
||||||
|
if age <= timedelta(hours=2):
|
||||||
|
return "valid"
|
||||||
|
|
||||||
|
# Cache is older than 2 hours - check if coordinator validated it recently
|
||||||
|
# If coordinator ran within last 30 minutes, cache is considered current
|
||||||
|
# (even if _last_price_update is older, e.g., from midnight turnover)
|
||||||
|
if coordinator._last_coordinator_update: # noqa: SLF001 - Internal state access
|
||||||
|
time_since_coordinator_check = current_time - coordinator._last_coordinator_update # noqa: SLF001
|
||||||
|
if time_since_coordinator_check <= timedelta(minutes=30):
|
||||||
|
# Coordinator validated cache recently - it's current
|
||||||
|
return "valid"
|
||||||
|
|
||||||
|
# Cache is old AND coordinator hasn't validated recently - stale
|
||||||
|
return "stale"
|
||||||
|
|
||||||
def get_api_calls_today(self) -> int:
|
def get_api_calls_today(self) -> int:
|
||||||
"""Get the number of API calls made today."""
|
"""Get the number of API calls made today."""
|
||||||
coordinator = self.coordinator
|
coordinator = self.coordinator
|
||||||
|
|
@ -172,13 +300,3 @@ class TibberPricesLifecycleCalculator(TibberPricesBaseCalculator):
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
return coordinator._api_calls_today # noqa: SLF001
|
return coordinator._api_calls_today # noqa: SLF001
|
||||||
|
|
||||||
def has_tomorrow_data(self) -> bool:
|
|
||||||
"""
|
|
||||||
Check if tomorrow's price data is available.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if tomorrow data exists in the pool.
|
|
||||||
|
|
||||||
"""
|
|
||||||
return not self.coordinator._needs_tomorrow_data() # noqa: SLF001
|
|
||||||
|
|
|
||||||
|
|
@ -11,8 +11,8 @@ from custom_components.tibber_prices.const import (
|
||||||
from custom_components.tibber_prices.coordinator.helpers import get_intervals_for_day_offsets
|
from custom_components.tibber_prices.coordinator.helpers import get_intervals_for_day_offsets
|
||||||
from custom_components.tibber_prices.entity_utils import find_rolling_hour_center_index
|
from custom_components.tibber_prices.entity_utils import find_rolling_hour_center_index
|
||||||
from custom_components.tibber_prices.sensor.helpers import (
|
from custom_components.tibber_prices.sensor.helpers import (
|
||||||
aggregate_average_data,
|
|
||||||
aggregate_level_data,
|
aggregate_level_data,
|
||||||
|
aggregate_price_data,
|
||||||
aggregate_rating_data,
|
aggregate_rating_data,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -32,7 +32,7 @@ class TibberPricesRollingHourCalculator(TibberPricesBaseCalculator):
|
||||||
*,
|
*,
|
||||||
hour_offset: int = 0,
|
hour_offset: int = 0,
|
||||||
value_type: str = "price",
|
value_type: str = "price",
|
||||||
) -> str | float | tuple[float | None, float | None] | None:
|
) -> str | float | None:
|
||||||
"""
|
"""
|
||||||
Unified method to get aggregated values from 5-interval rolling window.
|
Unified method to get aggregated values from 5-interval rolling window.
|
||||||
|
|
||||||
|
|
@ -44,7 +44,7 @@ class TibberPricesRollingHourCalculator(TibberPricesBaseCalculator):
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Aggregated value based on type:
|
Aggregated value based on type:
|
||||||
- "price": float or tuple[float, float | None] (avg, median)
|
- "price": float (average price in minor currency units)
|
||||||
- "level": str (aggregated level: "very_cheap", "cheap", etc.)
|
- "level": str (aggregated level: "very_cheap", "cheap", etc.)
|
||||||
- "rating": str (aggregated rating: "low", "normal", "high")
|
- "rating": str (aggregated rating: "low", "normal", "high")
|
||||||
|
|
||||||
|
|
@ -81,7 +81,7 @@ class TibberPricesRollingHourCalculator(TibberPricesBaseCalculator):
|
||||||
self,
|
self,
|
||||||
window_data: list[dict],
|
window_data: list[dict],
|
||||||
value_type: str,
|
value_type: str,
|
||||||
) -> str | float | tuple[float | None, float | None] | None:
|
) -> str | float | None:
|
||||||
"""
|
"""
|
||||||
Aggregate data from multiple intervals based on value type.
|
Aggregate data from multiple intervals based on value type.
|
||||||
|
|
||||||
|
|
@ -90,10 +90,7 @@ class TibberPricesRollingHourCalculator(TibberPricesBaseCalculator):
|
||||||
value_type: "price" | "level" | "rating".
|
value_type: "price" | "level" | "rating".
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Aggregated value based on type:
|
Aggregated value based on type.
|
||||||
- "price": tuple[float, float | None] (avg, median)
|
|
||||||
- "level": str
|
|
||||||
- "rating": str
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Get thresholds from config for rating aggregation
|
# Get thresholds from config for rating aggregation
|
||||||
|
|
@ -106,12 +103,9 @@ class TibberPricesRollingHourCalculator(TibberPricesBaseCalculator):
|
||||||
DEFAULT_PRICE_RATING_THRESHOLD_HIGH,
|
DEFAULT_PRICE_RATING_THRESHOLD_HIGH,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Handle price aggregation - return tuple directly
|
# Map value types to aggregation functions
|
||||||
if value_type == "price":
|
|
||||||
return aggregate_average_data(window_data, self.config_entry)
|
|
||||||
|
|
||||||
# Map other value types to aggregation functions
|
|
||||||
aggregators = {
|
aggregators = {
|
||||||
|
"price": lambda data: aggregate_price_data(data),
|
||||||
"level": lambda data: aggregate_level_data(data),
|
"level": lambda data: aggregate_level_data(data),
|
||||||
"rating": lambda data: aggregate_rating_data(data, threshold_low, threshold_high),
|
"rating": lambda data: aggregate_rating_data(data, threshold_low, threshold_high),
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -15,9 +15,8 @@ Caching strategy:
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import TYPE_CHECKING, Any
|
from typing import TYPE_CHECKING, Any
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import get_display_unit_factor
|
|
||||||
from custom_components.tibber_prices.coordinator.helpers import get_intervals_for_day_offsets
|
from custom_components.tibber_prices.coordinator.helpers import get_intervals_for_day_offsets
|
||||||
from custom_components.tibber_prices.utils.average import calculate_mean, calculate_next_n_hours_mean
|
from custom_components.tibber_prices.utils.average import calculate_next_n_hours_avg
|
||||||
from custom_components.tibber_prices.utils.price import (
|
from custom_components.tibber_prices.utils.price import (
|
||||||
calculate_price_trend,
|
calculate_price_trend,
|
||||||
find_price_data_for_interval,
|
find_price_data_for_interval,
|
||||||
|
|
@ -97,16 +96,14 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
# Get next interval timestamp (basis for calculation)
|
# Get next interval timestamp (basis for calculation)
|
||||||
next_interval_start = time.get_next_interval_start()
|
next_interval_start = time.get_next_interval_start()
|
||||||
|
|
||||||
# Get future mean price (ignore median for trend calculation)
|
# Get future average price
|
||||||
future_mean, _ = calculate_next_n_hours_mean(self.coordinator.data, hours, time=self.coordinator.time)
|
future_avg = calculate_next_n_hours_avg(self.coordinator.data, hours, time=self.coordinator.time)
|
||||||
if future_mean is None:
|
if future_avg is None:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Get configured thresholds from options
|
# Get configured thresholds from options
|
||||||
threshold_rising = self.config.get("price_trend_threshold_rising", 5.0)
|
threshold_rising = self.config.get("price_trend_threshold_rising", 5.0)
|
||||||
threshold_falling = self.config.get("price_trend_threshold_falling", -5.0)
|
threshold_falling = self.config.get("price_trend_threshold_falling", -5.0)
|
||||||
threshold_strongly_rising = self.config.get("price_trend_threshold_strongly_rising", 6.0)
|
|
||||||
threshold_strongly_falling = self.config.get("price_trend_threshold_strongly_falling", -6.0)
|
|
||||||
volatility_threshold_moderate = self.config.get("volatility_threshold_moderate", 15.0)
|
volatility_threshold_moderate = self.config.get("volatility_threshold_moderate", 15.0)
|
||||||
volatility_threshold_high = self.config.get("volatility_threshold_high", 30.0)
|
volatility_threshold_high = self.config.get("volatility_threshold_high", 30.0)
|
||||||
|
|
||||||
|
|
@ -117,13 +114,11 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
lookahead_intervals = self.coordinator.time.minutes_to_intervals(hours * 60)
|
lookahead_intervals = self.coordinator.time.minutes_to_intervals(hours * 60)
|
||||||
|
|
||||||
# Calculate trend with volatility-adaptive thresholds
|
# Calculate trend with volatility-adaptive thresholds
|
||||||
trend_state, diff_pct, trend_value = calculate_price_trend(
|
trend_state, diff_pct = calculate_price_trend(
|
||||||
current_interval_price,
|
current_interval_price,
|
||||||
future_mean,
|
future_avg,
|
||||||
threshold_rising=threshold_rising,
|
threshold_rising=threshold_rising,
|
||||||
threshold_falling=threshold_falling,
|
threshold_falling=threshold_falling,
|
||||||
threshold_strongly_rising=threshold_strongly_rising,
|
|
||||||
threshold_strongly_falling=threshold_strongly_falling,
|
|
||||||
volatility_adjustment=True, # Always enabled
|
volatility_adjustment=True, # Always enabled
|
||||||
lookahead_intervals=lookahead_intervals,
|
lookahead_intervals=lookahead_intervals,
|
||||||
all_intervals=all_intervals,
|
all_intervals=all_intervals,
|
||||||
|
|
@ -131,25 +126,18 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
volatility_threshold_high=volatility_threshold_high,
|
volatility_threshold_high=volatility_threshold_high,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Determine icon color based on trend state (5-level scale)
|
# Determine icon color based on trend state
|
||||||
# Strongly rising/falling uses more intense colors
|
|
||||||
icon_color = {
|
icon_color = {
|
||||||
"strongly_rising": "var(--error-color)", # Red for strongly rising (very expensive)
|
"rising": "var(--error-color)", # Red/Orange for rising prices (expensive)
|
||||||
"rising": "var(--warning-color)", # Orange/Yellow for rising prices
|
|
||||||
"stable": "var(--state-icon-color)", # Default gray for stable prices
|
|
||||||
"falling": "var(--success-color)", # Green for falling prices (cheaper)
|
"falling": "var(--success-color)", # Green for falling prices (cheaper)
|
||||||
"strongly_falling": "var(--success-color)", # Green for strongly falling (great deal)
|
"stable": "var(--state-icon-color)", # Default gray for stable prices
|
||||||
}.get(trend_state, "var(--state-icon-color)")
|
}.get(trend_state, "var(--state-icon-color)")
|
||||||
|
|
||||||
# Convert prices to display currency unit based on configuration
|
|
||||||
factor = get_display_unit_factor(self.config_entry)
|
|
||||||
|
|
||||||
# Store attributes in sensor-specific dictionary AND cache the trend value
|
# Store attributes in sensor-specific dictionary AND cache the trend value
|
||||||
self._trend_attributes = {
|
self._trend_attributes = {
|
||||||
"timestamp": next_interval_start,
|
"timestamp": next_interval_start,
|
||||||
"trend_value": trend_value,
|
|
||||||
f"trend_{hours}h_%": round(diff_pct, 1),
|
f"trend_{hours}h_%": round(diff_pct, 1),
|
||||||
f"next_{hours}h_avg": round(future_mean * factor, 2),
|
f"next_{hours}h_avg": round(future_avg * 100, 2),
|
||||||
"interval_count": lookahead_intervals,
|
"interval_count": lookahead_intervals,
|
||||||
"threshold_rising": threshold_rising,
|
"threshold_rising": threshold_rising,
|
||||||
"threshold_falling": threshold_falling,
|
"threshold_falling": threshold_falling,
|
||||||
|
|
@ -161,7 +149,7 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
# Get second half average for longer periods
|
# Get second half average for longer periods
|
||||||
later_half_avg = self._calculate_later_half_average(hours, next_interval_start)
|
later_half_avg = self._calculate_later_half_average(hours, next_interval_start)
|
||||||
if later_half_avg is not None:
|
if later_half_avg is not None:
|
||||||
self._trend_attributes[f"second_half_{hours}h_avg"] = round(later_half_avg * factor, 2)
|
self._trend_attributes[f"second_half_{hours}h_avg"] = round(later_half_avg * 100, 2)
|
||||||
|
|
||||||
# Calculate incremental change: how much does the later half differ from current?
|
# Calculate incremental change: how much does the later half differ from current?
|
||||||
# CRITICAL: Use abs() for negative prices and allow calculation for all non-zero prices
|
# CRITICAL: Use abs() for negative prices and allow calculation for all non-zero prices
|
||||||
|
|
@ -290,7 +278,7 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
later_prices.append(float(price))
|
later_prices.append(float(price))
|
||||||
|
|
||||||
if later_prices:
|
if later_prices:
|
||||||
return calculate_mean(later_prices)
|
return sum(later_prices) / len(later_prices)
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
@ -357,11 +345,11 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
|
|
||||||
# Combine momentum + future outlook to get ACTUAL current trend
|
# Combine momentum + future outlook to get ACTUAL current trend
|
||||||
if len(future_intervals) >= min_intervals_for_trend and future_prices:
|
if len(future_intervals) >= min_intervals_for_trend and future_prices:
|
||||||
future_mean = calculate_mean(future_prices)
|
future_avg = sum(future_prices) / len(future_prices)
|
||||||
current_trend_state = self._combine_momentum_with_future(
|
current_trend_state = self._combine_momentum_with_future(
|
||||||
current_momentum=current_momentum,
|
current_momentum=current_momentum,
|
||||||
current_price=current_price,
|
current_price=current_price,
|
||||||
future_mean=future_mean,
|
future_avg=future_avg,
|
||||||
context={
|
context={
|
||||||
"all_intervals": all_intervals,
|
"all_intervals": all_intervals,
|
||||||
"current_index": current_index,
|
"current_index": current_index,
|
||||||
|
|
@ -422,8 +410,6 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
return {
|
return {
|
||||||
"rising": self.config.get("price_trend_threshold_rising", 5.0),
|
"rising": self.config.get("price_trend_threshold_rising", 5.0),
|
||||||
"falling": self.config.get("price_trend_threshold_falling", -5.0),
|
"falling": self.config.get("price_trend_threshold_falling", -5.0),
|
||||||
"strongly_rising": self.config.get("price_trend_threshold_strongly_rising", 6.0),
|
|
||||||
"strongly_falling": self.config.get("price_trend_threshold_strongly_falling", -6.0),
|
|
||||||
"moderate": self.config.get("volatility_threshold_moderate", 15.0),
|
"moderate": self.config.get("volatility_threshold_moderate", 15.0),
|
||||||
"high": self.config.get("volatility_threshold_high", 30.0),
|
"high": self.config.get("volatility_threshold_high", 30.0),
|
||||||
}
|
}
|
||||||
|
|
@ -438,7 +424,7 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
current_index: Index of current interval
|
current_index: Index of current interval
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Momentum direction: "strongly_rising", "rising", "stable", "falling", or "strongly_falling"
|
Momentum direction: "rising", "falling", or "stable"
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Look back 1 hour (4 intervals) for quick reaction
|
# Look back 1 hour (4 intervals) for quick reaction
|
||||||
|
|
@ -461,91 +447,64 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
weighted_sum = sum(price * weight for price, weight in zip(trailing_prices, weights, strict=True))
|
weighted_sum = sum(price * weight for price, weight in zip(trailing_prices, weights, strict=True))
|
||||||
weighted_avg = weighted_sum / sum(weights)
|
weighted_avg = weighted_sum / sum(weights)
|
||||||
|
|
||||||
# Calculate momentum with thresholds
|
# Calculate momentum with 3% threshold
|
||||||
# Using same logic as 5-level trend: 3% for normal, 6% (2x) for strong
|
|
||||||
momentum_threshold = 0.03
|
momentum_threshold = 0.03
|
||||||
strong_momentum_threshold = 0.06
|
diff = (current_price - weighted_avg) / weighted_avg
|
||||||
diff = (current_price - weighted_avg) / abs(weighted_avg) if weighted_avg != 0 else 0
|
|
||||||
|
|
||||||
# Determine momentum level based on thresholds
|
if diff > momentum_threshold:
|
||||||
if diff >= strong_momentum_threshold:
|
return "rising"
|
||||||
momentum = "strongly_rising"
|
if diff < -momentum_threshold:
|
||||||
elif diff > momentum_threshold:
|
return "falling"
|
||||||
momentum = "rising"
|
return "stable"
|
||||||
elif diff <= -strong_momentum_threshold:
|
|
||||||
momentum = "strongly_falling"
|
|
||||||
elif diff < -momentum_threshold:
|
|
||||||
momentum = "falling"
|
|
||||||
else:
|
|
||||||
momentum = "stable"
|
|
||||||
|
|
||||||
return momentum
|
|
||||||
|
|
||||||
def _combine_momentum_with_future(
|
def _combine_momentum_with_future(
|
||||||
self,
|
self,
|
||||||
*,
|
*,
|
||||||
current_momentum: str,
|
current_momentum: str,
|
||||||
current_price: float,
|
current_price: float,
|
||||||
future_mean: float,
|
future_avg: float,
|
||||||
context: dict,
|
context: dict,
|
||||||
) -> str:
|
) -> str:
|
||||||
"""
|
"""
|
||||||
Combine momentum analysis with future outlook to determine final trend.
|
Combine momentum analysis with future outlook to determine final trend.
|
||||||
|
|
||||||
Uses 5-level scale: strongly_rising, rising, stable, falling, strongly_falling.
|
|
||||||
Momentum intensity is preserved when future confirms the trend direction.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
current_momentum: Current momentum direction (5-level scale)
|
current_momentum: Current momentum direction (rising/falling/stable)
|
||||||
current_price: Current interval price
|
current_price: Current interval price
|
||||||
future_mean: Average price in future window
|
future_avg: Average price in future window
|
||||||
context: Dict with all_intervals, current_index, lookahead_intervals, thresholds
|
context: Dict with all_intervals, current_index, lookahead_intervals, thresholds
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Final trend direction (5-level scale)
|
Final trend direction: "rising", "falling", or "stable"
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Use calculate_price_trend for consistency with 5-level logic
|
if current_momentum == "rising":
|
||||||
|
# We're in uptrend - does it continue?
|
||||||
|
return "rising" if future_avg >= current_price * 0.98 else "falling"
|
||||||
|
|
||||||
|
if current_momentum == "falling":
|
||||||
|
# We're in downtrend - does it continue?
|
||||||
|
return "falling" if future_avg <= current_price * 1.02 else "rising"
|
||||||
|
|
||||||
|
# current_momentum == "stable" - what's coming?
|
||||||
all_intervals = context["all_intervals"]
|
all_intervals = context["all_intervals"]
|
||||||
current_index = context["current_index"]
|
current_index = context["current_index"]
|
||||||
lookahead_intervals = context["lookahead_intervals"]
|
lookahead_intervals = context["lookahead_intervals"]
|
||||||
thresholds = context["thresholds"]
|
thresholds = context["thresholds"]
|
||||||
|
|
||||||
lookahead_for_volatility = all_intervals[current_index : current_index + lookahead_intervals]
|
lookahead_for_volatility = all_intervals[current_index : current_index + lookahead_intervals]
|
||||||
future_trend, _, _ = calculate_price_trend(
|
trend_state, _ = calculate_price_trend(
|
||||||
current_price,
|
current_price,
|
||||||
future_mean,
|
future_avg,
|
||||||
threshold_rising=thresholds["rising"],
|
threshold_rising=thresholds["rising"],
|
||||||
threshold_falling=thresholds["falling"],
|
threshold_falling=thresholds["falling"],
|
||||||
threshold_strongly_rising=thresholds["strongly_rising"],
|
|
||||||
threshold_strongly_falling=thresholds["strongly_falling"],
|
|
||||||
volatility_adjustment=True,
|
volatility_adjustment=True,
|
||||||
lookahead_intervals=lookahead_intervals,
|
lookahead_intervals=lookahead_intervals,
|
||||||
all_intervals=lookahead_for_volatility,
|
all_intervals=lookahead_for_volatility,
|
||||||
volatility_threshold_moderate=thresholds["moderate"],
|
volatility_threshold_moderate=thresholds["moderate"],
|
||||||
volatility_threshold_high=thresholds["high"],
|
volatility_threshold_high=thresholds["high"],
|
||||||
)
|
)
|
||||||
|
return trend_state
|
||||||
# Check if momentum and future trend are aligned (same direction)
|
|
||||||
momentum_rising = current_momentum in ("rising", "strongly_rising")
|
|
||||||
momentum_falling = current_momentum in ("falling", "strongly_falling")
|
|
||||||
future_rising = future_trend in ("rising", "strongly_rising")
|
|
||||||
future_falling = future_trend in ("falling", "strongly_falling")
|
|
||||||
|
|
||||||
if momentum_rising and future_rising:
|
|
||||||
# Both indicate rising - use the stronger signal
|
|
||||||
if current_momentum == "strongly_rising" or future_trend == "strongly_rising":
|
|
||||||
return "strongly_rising"
|
|
||||||
return "rising"
|
|
||||||
|
|
||||||
if momentum_falling and future_falling:
|
|
||||||
# Both indicate falling - use the stronger signal
|
|
||||||
if current_momentum == "strongly_falling" or future_trend == "strongly_falling":
|
|
||||||
return "strongly_falling"
|
|
||||||
return "falling"
|
|
||||||
|
|
||||||
# Conflicting signals or stable momentum - trust future trend calculation
|
|
||||||
return future_trend
|
|
||||||
|
|
||||||
def _calculate_standard_trend(
|
def _calculate_standard_trend(
|
||||||
self,
|
self,
|
||||||
|
|
@ -567,17 +526,15 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
if not standard_future_prices:
|
if not standard_future_prices:
|
||||||
return "stable"
|
return "stable"
|
||||||
|
|
||||||
standard_future_mean = calculate_mean(standard_future_prices)
|
standard_future_avg = sum(standard_future_prices) / len(standard_future_prices)
|
||||||
current_price = float(current_interval["total"])
|
current_price = float(current_interval["total"])
|
||||||
|
|
||||||
standard_lookahead_volatility = all_intervals[current_index : current_index + standard_lookahead]
|
standard_lookahead_volatility = all_intervals[current_index : current_index + standard_lookahead]
|
||||||
current_trend_3h, _, _ = calculate_price_trend(
|
current_trend_3h, _ = calculate_price_trend(
|
||||||
current_price,
|
current_price,
|
||||||
standard_future_mean,
|
standard_future_avg,
|
||||||
threshold_rising=thresholds["rising"],
|
threshold_rising=thresholds["rising"],
|
||||||
threshold_falling=thresholds["falling"],
|
threshold_falling=thresholds["falling"],
|
||||||
threshold_strongly_rising=thresholds["strongly_rising"],
|
|
||||||
threshold_strongly_falling=thresholds["strongly_falling"],
|
|
||||||
volatility_adjustment=True,
|
volatility_adjustment=True,
|
||||||
lookahead_intervals=standard_lookahead,
|
lookahead_intervals=standard_lookahead,
|
||||||
all_intervals=standard_lookahead_volatility,
|
all_intervals=standard_lookahead_volatility,
|
||||||
|
|
@ -640,18 +597,16 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
if not future_prices:
|
if not future_prices:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
future_mean = calculate_mean(future_prices)
|
future_avg = sum(future_prices) / len(future_prices)
|
||||||
price = float(interval["total"])
|
price = float(interval["total"])
|
||||||
|
|
||||||
# Calculate trend at this past point
|
# Calculate trend at this past point
|
||||||
lookahead_for_volatility = all_intervals[i : i + intervals_in_3h]
|
lookahead_for_volatility = all_intervals[i : i + intervals_in_3h]
|
||||||
trend_state, _, _ = calculate_price_trend(
|
trend_state, _ = calculate_price_trend(
|
||||||
price,
|
price,
|
||||||
future_mean,
|
future_avg,
|
||||||
threshold_rising=thresholds["rising"],
|
threshold_rising=thresholds["rising"],
|
||||||
threshold_falling=thresholds["falling"],
|
threshold_falling=thresholds["falling"],
|
||||||
threshold_strongly_rising=thresholds["strongly_rising"],
|
|
||||||
threshold_strongly_falling=thresholds["strongly_falling"],
|
|
||||||
volatility_adjustment=True,
|
volatility_adjustment=True,
|
||||||
lookahead_intervals=intervals_in_3h,
|
lookahead_intervals=intervals_in_3h,
|
||||||
all_intervals=lookahead_for_volatility,
|
all_intervals=lookahead_for_volatility,
|
||||||
|
|
@ -714,18 +669,16 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
if not future_prices:
|
if not future_prices:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
future_mean = calculate_mean(future_prices)
|
future_avg = sum(future_prices) / len(future_prices)
|
||||||
current_price = float(interval["total"])
|
current_price = float(interval["total"])
|
||||||
|
|
||||||
# Calculate trend at this future point
|
# Calculate trend at this future point
|
||||||
lookahead_for_volatility = all_intervals[i : i + intervals_in_3h]
|
lookahead_for_volatility = all_intervals[i : i + intervals_in_3h]
|
||||||
trend_state, _, _ = calculate_price_trend(
|
trend_state, _ = calculate_price_trend(
|
||||||
current_price,
|
current_price,
|
||||||
future_mean,
|
future_avg,
|
||||||
threshold_rising=thresholds["rising"],
|
threshold_rising=thresholds["rising"],
|
||||||
threshold_falling=thresholds["falling"],
|
threshold_falling=thresholds["falling"],
|
||||||
threshold_strongly_rising=thresholds["strongly_rising"],
|
|
||||||
threshold_strongly_falling=thresholds["strongly_falling"],
|
|
||||||
volatility_adjustment=True,
|
volatility_adjustment=True,
|
||||||
lookahead_intervals=intervals_in_3h,
|
lookahead_intervals=intervals_in_3h,
|
||||||
all_intervals=lookahead_for_volatility,
|
all_intervals=lookahead_for_volatility,
|
||||||
|
|
@ -740,17 +693,14 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
||||||
time = self.coordinator.time
|
time = self.coordinator.time
|
||||||
minutes_until = int(time.minutes_until(interval_start))
|
minutes_until = int(time.minutes_until(interval_start))
|
||||||
|
|
||||||
# Convert prices to display currency unit
|
|
||||||
factor = get_display_unit_factor(self.config_entry)
|
|
||||||
|
|
||||||
self._trend_change_attributes = {
|
self._trend_change_attributes = {
|
||||||
"direction": trend_state,
|
"direction": trend_state,
|
||||||
"from_direction": current_trend_state,
|
"from_direction": current_trend_state,
|
||||||
"minutes_until_change": minutes_until,
|
"minutes_until_change": minutes_until,
|
||||||
"current_price_now": round(float(current_interval["total"]) * factor, 2),
|
"current_price_now": round(float(current_interval["total"]) * 100, 2),
|
||||||
"price_at_change": round(current_price * factor, 2),
|
"price_at_change": round(current_price * 100, 2),
|
||||||
"avg_after_change": round(future_mean * factor, 2),
|
"avg_after_change": round(future_avg * 100, 2),
|
||||||
"trend_diff_%": round((future_mean - current_price) / current_price * 100, 1),
|
"trend_diff_%": round((future_avg - current_price) / current_price * 100, 1),
|
||||||
}
|
}
|
||||||
return interval_start
|
return interval_start
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -4,22 +4,12 @@ from __future__ import annotations
|
||||||
|
|
||||||
from typing import TYPE_CHECKING
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import (
|
|
||||||
CONF_VOLATILITY_THRESHOLD_HIGH,
|
|
||||||
CONF_VOLATILITY_THRESHOLD_MODERATE,
|
|
||||||
CONF_VOLATILITY_THRESHOLD_VERY_HIGH,
|
|
||||||
DEFAULT_VOLATILITY_THRESHOLD_HIGH,
|
|
||||||
DEFAULT_VOLATILITY_THRESHOLD_MODERATE,
|
|
||||||
DEFAULT_VOLATILITY_THRESHOLD_VERY_HIGH,
|
|
||||||
get_display_unit_factor,
|
|
||||||
)
|
|
||||||
from custom_components.tibber_prices.entity_utils import add_icon_color_attribute
|
from custom_components.tibber_prices.entity_utils import add_icon_color_attribute
|
||||||
from custom_components.tibber_prices.sensor.attributes import (
|
from custom_components.tibber_prices.sensor.attributes import (
|
||||||
add_volatility_type_attributes,
|
add_volatility_type_attributes,
|
||||||
get_prices_for_volatility,
|
get_prices_for_volatility,
|
||||||
)
|
)
|
||||||
from custom_components.tibber_prices.utils.average import calculate_mean
|
from custom_components.tibber_prices.utils.price import calculate_volatility_level
|
||||||
from custom_components.tibber_prices.utils.price import calculate_volatility_with_cv
|
|
||||||
|
|
||||||
from .base import TibberPricesBaseCalculator
|
from .base import TibberPricesBaseCalculator
|
||||||
|
|
||||||
|
|
@ -66,22 +56,14 @@ class TibberPricesVolatilityCalculator(TibberPricesBaseCalculator):
|
||||||
|
|
||||||
# Get volatility thresholds from config
|
# Get volatility thresholds from config
|
||||||
thresholds = {
|
thresholds = {
|
||||||
"threshold_moderate": self.config.get(
|
"threshold_moderate": self.config.get("volatility_threshold_moderate", 5.0),
|
||||||
CONF_VOLATILITY_THRESHOLD_MODERATE,
|
"threshold_high": self.config.get("volatility_threshold_high", 15.0),
|
||||||
DEFAULT_VOLATILITY_THRESHOLD_MODERATE,
|
"threshold_very_high": self.config.get("volatility_threshold_very_high", 30.0),
|
||||||
),
|
|
||||||
"threshold_high": self.config.get(CONF_VOLATILITY_THRESHOLD_HIGH, DEFAULT_VOLATILITY_THRESHOLD_HIGH),
|
|
||||||
"threshold_very_high": self.config.get(
|
|
||||||
CONF_VOLATILITY_THRESHOLD_VERY_HIGH,
|
|
||||||
DEFAULT_VOLATILITY_THRESHOLD_VERY_HIGH,
|
|
||||||
),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Get prices based on volatility type
|
# Get prices based on volatility type
|
||||||
prices_to_analyze = get_prices_for_volatility(
|
prices_to_analyze = get_prices_for_volatility(
|
||||||
volatility_type,
|
volatility_type, self.coordinator.data, time=self.coordinator.time
|
||||||
self.coordinator.data,
|
|
||||||
time=self.coordinator.time,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if not prices_to_analyze:
|
if not prices_to_analyze:
|
||||||
|
|
@ -91,24 +73,21 @@ class TibberPricesVolatilityCalculator(TibberPricesBaseCalculator):
|
||||||
price_min = min(prices_to_analyze)
|
price_min = min(prices_to_analyze)
|
||||||
price_max = max(prices_to_analyze)
|
price_max = max(prices_to_analyze)
|
||||||
spread = price_max - price_min
|
spread = price_max - price_min
|
||||||
# Use arithmetic mean for volatility calculation (required for coefficient of variation)
|
price_avg = sum(prices_to_analyze) / len(prices_to_analyze)
|
||||||
price_mean = calculate_mean(prices_to_analyze)
|
|
||||||
|
|
||||||
# Convert to display currency unit based on configuration
|
# Convert to minor currency units (ct/øre) for display
|
||||||
factor = get_display_unit_factor(self.config_entry)
|
spread_minor = spread * 100
|
||||||
spread_display = spread * factor
|
|
||||||
|
|
||||||
# Calculate volatility level AND coefficient of variation
|
# Calculate volatility level with custom thresholds (pass price list, not spread)
|
||||||
volatility, cv = calculate_volatility_with_cv(prices_to_analyze, **thresholds)
|
volatility = calculate_volatility_level(prices_to_analyze, **thresholds)
|
||||||
|
|
||||||
# Store attributes for this sensor
|
# Store attributes for this sensor
|
||||||
self._last_volatility_attributes = {
|
self._last_volatility_attributes = {
|
||||||
"price_spread": round(spread_display, 2),
|
"price_spread": round(spread_minor, 2),
|
||||||
"price_coefficient_variation_%": round(cv, 2) if cv is not None else None,
|
"price_volatility": volatility,
|
||||||
"price_volatility": volatility.lower(),
|
"price_min": round(price_min * 100, 2),
|
||||||
"price_min": round(price_min * factor, 2),
|
"price_max": round(price_max * 100, 2),
|
||||||
"price_max": round(price_max * factor, 2),
|
"price_avg": round(price_avg * 100, 2),
|
||||||
"price_mean": round(price_mean * factor, 2),
|
|
||||||
"interval_count": len(prices_to_analyze),
|
"interval_count": len(prices_to_analyze),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ class TibberPricesWindow24hCalculator(TibberPricesBaseCalculator):
|
||||||
self,
|
self,
|
||||||
*,
|
*,
|
||||||
stat_func: Callable,
|
stat_func: Callable,
|
||||||
) -> float | tuple[float, float | None] | None:
|
) -> float | None:
|
||||||
"""
|
"""
|
||||||
Unified method for 24-hour sliding window statistics.
|
Unified method for 24-hour sliding window statistics.
|
||||||
|
|
||||||
|
|
@ -33,38 +33,20 @@ class TibberPricesWindow24hCalculator(TibberPricesBaseCalculator):
|
||||||
- "leading": Next 24 hours (96 intervals after current)
|
- "leading": Next 24 hours (96 intervals after current)
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
stat_func: Function from average_utils (e.g., calculate_current_trailing_mean).
|
stat_func: Function from average_utils (e.g., calculate_current_trailing_avg).
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Price value in subunit currency units (cents/øre), or None if unavailable.
|
Price value in minor currency units (cents/øre), or None if unavailable.
|
||||||
For mean functions: tuple of (mean, median) where median may be None.
|
|
||||||
For min/max functions: single float value.
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if not self.has_data():
|
if not self.has_data():
|
||||||
return None
|
return None
|
||||||
|
|
||||||
result = stat_func(self.coordinator_data, time=self.coordinator.time)
|
value = stat_func(self.coordinator_data, time=self.coordinator.time)
|
||||||
|
|
||||||
# Check if result is a tuple (mean, median) from mean functions
|
|
||||||
if isinstance(result, tuple):
|
|
||||||
value, median = result
|
|
||||||
if value is None:
|
|
||||||
return None
|
|
||||||
# Convert to display currency units based on config
|
|
||||||
mean_result = round(get_price_value(value, config_entry=self.coordinator.config_entry), 2)
|
|
||||||
median_result = (
|
|
||||||
round(get_price_value(median, config_entry=self.coordinator.config_entry), 2)
|
|
||||||
if median is not None
|
|
||||||
else None
|
|
||||||
)
|
|
||||||
return mean_result, median_result
|
|
||||||
|
|
||||||
# Single value result (min/max functions)
|
|
||||||
value = result
|
|
||||||
if value is None:
|
if value is None:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Return in configured display currency units with 2 decimals
|
# Always return in minor currency units (cents/øre) with 2 decimals
|
||||||
result = get_price_value(value, config_entry=self.coordinator.config_entry)
|
result = get_price_value(value, in_euro=False)
|
||||||
return round(result, 2)
|
return round(result, 2)
|
||||||
|
|
|
||||||
|
|
@ -38,9 +38,6 @@ async def call_chartdata_service_async(
|
||||||
# Add required entry_id parameter
|
# Add required entry_id parameter
|
||||||
service_params["entry_id"] = config_entry.entry_id
|
service_params["entry_id"] = config_entry.entry_id
|
||||||
|
|
||||||
# Make sure metadata is never requested for this sensor
|
|
||||||
service_params["metadata"] = "none"
|
|
||||||
|
|
||||||
# Call get_chartdata service using official HA service system
|
# Call get_chartdata service using official HA service system
|
||||||
try:
|
try:
|
||||||
response = await hass.services.async_call(
|
response = await hass.services.async_call(
|
||||||
|
|
|
||||||
|
|
@ -1,149 +0,0 @@
|
||||||
"""Chart metadata export functionality for Tibber Prices sensors."""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from typing import TYPE_CHECKING
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import (
|
|
||||||
CONF_CURRENCY_DISPLAY_MODE,
|
|
||||||
DATA_CHART_METADATA_CONFIG,
|
|
||||||
DISPLAY_MODE_SUBUNIT,
|
|
||||||
DOMAIN,
|
|
||||||
)
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.coordinator import TibberPricesDataUpdateCoordinator
|
|
||||||
from custom_components.tibber_prices.data import TibberPricesConfigEntry
|
|
||||||
from homeassistant.core import HomeAssistant
|
|
||||||
|
|
||||||
|
|
||||||
async def call_chartdata_service_for_metadata_async(
|
|
||||||
hass: HomeAssistant,
|
|
||||||
coordinator: TibberPricesDataUpdateCoordinator,
|
|
||||||
config_entry: TibberPricesConfigEntry,
|
|
||||||
) -> tuple[dict | None, str | None]:
|
|
||||||
"""
|
|
||||||
Call get_chartdata service with configuration from configuration.yaml for metadata (async).
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple of (response, error_message).
|
|
||||||
If successful: (response_dict, None)
|
|
||||||
If failed: (None, error_string)
|
|
||||||
|
|
||||||
"""
|
|
||||||
# Get configuration from hass.data (loaded from configuration.yaml)
|
|
||||||
domain_data = hass.data.get(DOMAIN, {})
|
|
||||||
chart_metadata_config = domain_data.get(DATA_CHART_METADATA_CONFIG, {})
|
|
||||||
|
|
||||||
# Use chart_metadata_config directly (already a dict from async_setup)
|
|
||||||
service_params = dict(chart_metadata_config) if chart_metadata_config else {}
|
|
||||||
|
|
||||||
# Add required entry_id parameter
|
|
||||||
service_params["entry_id"] = config_entry.entry_id
|
|
||||||
|
|
||||||
# Force metadata to "only" - this sensor ONLY provides metadata
|
|
||||||
service_params["metadata"] = "only"
|
|
||||||
|
|
||||||
# Use user's display unit preference from config_entry
|
|
||||||
# This ensures chart_metadata yaxis values match the user's configured currency display mode
|
|
||||||
if "subunit_currency" not in service_params:
|
|
||||||
display_mode = config_entry.options.get(CONF_CURRENCY_DISPLAY_MODE, DISPLAY_MODE_SUBUNIT)
|
|
||||||
service_params["subunit_currency"] = display_mode == DISPLAY_MODE_SUBUNIT
|
|
||||||
|
|
||||||
# Call get_chartdata service using official HA service system
|
|
||||||
try:
|
|
||||||
response = await hass.services.async_call(
|
|
||||||
DOMAIN,
|
|
||||||
"get_chartdata",
|
|
||||||
service_params,
|
|
||||||
blocking=True,
|
|
||||||
return_response=True,
|
|
||||||
)
|
|
||||||
except Exception as ex:
|
|
||||||
coordinator.logger.exception("Chart metadata service call failed")
|
|
||||||
return None, str(ex)
|
|
||||||
else:
|
|
||||||
return response, None
|
|
||||||
|
|
||||||
|
|
||||||
def get_chart_metadata_state(
|
|
||||||
chart_metadata_response: dict | None,
|
|
||||||
chart_metadata_error: str | None,
|
|
||||||
) -> str | None:
|
|
||||||
"""
|
|
||||||
Return state for chart_metadata sensor.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
chart_metadata_response: Last service response (or None)
|
|
||||||
chart_metadata_error: Last error message (or None)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
"error" if error occurred
|
|
||||||
"ready" if metadata available
|
|
||||||
"pending" if no data yet
|
|
||||||
|
|
||||||
"""
|
|
||||||
if chart_metadata_error:
|
|
||||||
return "error"
|
|
||||||
if chart_metadata_response:
|
|
||||||
return "ready"
|
|
||||||
return "pending"
|
|
||||||
|
|
||||||
|
|
||||||
def build_chart_metadata_attributes(
|
|
||||||
chart_metadata_response: dict | None,
|
|
||||||
chart_metadata_last_update: datetime | None,
|
|
||||||
chart_metadata_error: str | None,
|
|
||||||
) -> dict[str, object] | None:
|
|
||||||
"""
|
|
||||||
Return chart metadata from last service call as attributes.
|
|
||||||
|
|
||||||
Attribute order: timestamp, error (if any), metadata fields (at the end).
|
|
||||||
|
|
||||||
Args:
|
|
||||||
chart_metadata_response: Last service response (should contain "metadata" key)
|
|
||||||
chart_metadata_last_update: Timestamp of last update
|
|
||||||
chart_metadata_error: Error message if service call failed
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dict with timestamp, optional error, and metadata fields.
|
|
||||||
|
|
||||||
"""
|
|
||||||
# Build base attributes with timestamp FIRST
|
|
||||||
attributes: dict[str, object] = {
|
|
||||||
"timestamp": chart_metadata_last_update,
|
|
||||||
}
|
|
||||||
|
|
||||||
# Add error message if service call failed
|
|
||||||
if chart_metadata_error:
|
|
||||||
attributes["error"] = chart_metadata_error
|
|
||||||
|
|
||||||
if not chart_metadata_response:
|
|
||||||
# No data - only timestamp (and error if present)
|
|
||||||
return attributes
|
|
||||||
|
|
||||||
# Extract metadata from response (get_chartdata returns {"metadata": {...}})
|
|
||||||
metadata = chart_metadata_response.get("metadata", {})
|
|
||||||
|
|
||||||
# Extract the fields we care about for charts
|
|
||||||
# These are the universal chart metadata fields useful for any chart card
|
|
||||||
if metadata:
|
|
||||||
yaxis_suggested = metadata.get("yaxis_suggested", {})
|
|
||||||
|
|
||||||
# Add yaxis bounds (useful for all chart cards)
|
|
||||||
if "min" in yaxis_suggested:
|
|
||||||
attributes["yaxis_min"] = yaxis_suggested["min"]
|
|
||||||
if "max" in yaxis_suggested:
|
|
||||||
attributes["yaxis_max"] = yaxis_suggested["max"]
|
|
||||||
|
|
||||||
# Add currency info (useful for labeling)
|
|
||||||
if "currency" in metadata:
|
|
||||||
attributes["currency"] = metadata["currency"]
|
|
||||||
|
|
||||||
# Add resolution info (interval duration in minutes)
|
|
||||||
if "resolution" in metadata:
|
|
||||||
attributes["resolution"] = metadata["resolution"]
|
|
||||||
|
|
||||||
return attributes
|
|
||||||
|
|
@ -9,18 +9,13 @@ from custom_components.tibber_prices.binary_sensor.attributes import (
|
||||||
get_price_intervals_attributes,
|
get_price_intervals_attributes,
|
||||||
)
|
)
|
||||||
from custom_components.tibber_prices.const import (
|
from custom_components.tibber_prices.const import (
|
||||||
CONF_AVERAGE_SENSOR_DISPLAY,
|
|
||||||
CONF_CURRENCY_DISPLAY_MODE,
|
|
||||||
CONF_PRICE_RATING_THRESHOLD_HIGH,
|
CONF_PRICE_RATING_THRESHOLD_HIGH,
|
||||||
CONF_PRICE_RATING_THRESHOLD_LOW,
|
CONF_PRICE_RATING_THRESHOLD_LOW,
|
||||||
DEFAULT_AVERAGE_SENSOR_DISPLAY,
|
|
||||||
DEFAULT_PRICE_RATING_THRESHOLD_HIGH,
|
DEFAULT_PRICE_RATING_THRESHOLD_HIGH,
|
||||||
DEFAULT_PRICE_RATING_THRESHOLD_LOW,
|
DEFAULT_PRICE_RATING_THRESHOLD_LOW,
|
||||||
DISPLAY_MODE_BASE,
|
|
||||||
DOMAIN,
|
DOMAIN,
|
||||||
format_price_unit_base,
|
format_price_unit_major,
|
||||||
get_display_unit_factor,
|
format_price_unit_minor,
|
||||||
get_display_unit_string,
|
|
||||||
)
|
)
|
||||||
from custom_components.tibber_prices.coordinator import (
|
from custom_components.tibber_prices.coordinator import (
|
||||||
MINUTE_UPDATE_ENTITY_KEYS,
|
MINUTE_UPDATE_ENTITY_KEYS,
|
||||||
|
|
@ -40,14 +35,14 @@ from custom_components.tibber_prices.entity_utils.icons import (
|
||||||
get_dynamic_icon,
|
get_dynamic_icon,
|
||||||
)
|
)
|
||||||
from custom_components.tibber_prices.utils.average import (
|
from custom_components.tibber_prices.utils.average import (
|
||||||
calculate_next_n_hours_mean,
|
calculate_next_n_hours_avg,
|
||||||
)
|
)
|
||||||
from custom_components.tibber_prices.utils.price import (
|
from custom_components.tibber_prices.utils.price import (
|
||||||
calculate_volatility_level,
|
calculate_volatility_level,
|
||||||
)
|
)
|
||||||
from homeassistant.components.sensor import (
|
from homeassistant.components.sensor import (
|
||||||
RestoreSensor,
|
|
||||||
SensorDeviceClass,
|
SensorDeviceClass,
|
||||||
|
SensorEntity,
|
||||||
SensorEntityDescription,
|
SensorEntityDescription,
|
||||||
)
|
)
|
||||||
from homeassistant.const import EntityCategory
|
from homeassistant.const import EntityCategory
|
||||||
|
|
@ -75,11 +70,6 @@ from .chart_data import (
|
||||||
call_chartdata_service_async,
|
call_chartdata_service_async,
|
||||||
get_chart_data_state,
|
get_chart_data_state,
|
||||||
)
|
)
|
||||||
from .chart_metadata import (
|
|
||||||
build_chart_metadata_attributes,
|
|
||||||
call_chartdata_service_for_metadata_async,
|
|
||||||
get_chart_metadata_state,
|
|
||||||
)
|
|
||||||
from .helpers import aggregate_level_data, aggregate_rating_data
|
from .helpers import aggregate_level_data, aggregate_rating_data
|
||||||
from .value_getters import get_value_getter_mapping
|
from .value_getters import get_value_getter_mapping
|
||||||
|
|
||||||
|
|
@ -97,60 +87,8 @@ MAX_FORECAST_INTERVALS = 8 # Show up to 8 future intervals (2 hours with 15-min
|
||||||
MIN_HOURS_FOR_LATER_HALF = 3 # Minimum hours needed to calculate later half average
|
MIN_HOURS_FOR_LATER_HALF = 3 # Minimum hours needed to calculate later half average
|
||||||
|
|
||||||
|
|
||||||
class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
class TibberPricesSensor(TibberPricesEntity, SensorEntity):
|
||||||
"""tibber_prices Sensor class with state restoration."""
|
"""tibber_prices Sensor class."""
|
||||||
|
|
||||||
# Base attributes excluded from recorder history (shared across all sensors)
|
|
||||||
# See: https://developers.home-assistant.io/docs/core/entity/#excluding-state-attributes-from-recorder-history
|
|
||||||
_unrecorded_attributes = frozenset(
|
|
||||||
{
|
|
||||||
"timestamp",
|
|
||||||
# Descriptions/Help Text (static, large)
|
|
||||||
"description",
|
|
||||||
"usage_tips",
|
|
||||||
# Large Nested Structures
|
|
||||||
"trend_attributes",
|
|
||||||
"current_trend_attributes",
|
|
||||||
"trend_change_attributes",
|
|
||||||
"volatility_attributes",
|
|
||||||
"data", # chart_data_export large nested data
|
|
||||||
# Frequently Changing Diagnostics
|
|
||||||
"icon_color",
|
|
||||||
"cache_age",
|
|
||||||
"cache_validity",
|
|
||||||
"data_completeness",
|
|
||||||
"data_status",
|
|
||||||
# Static/Rarely Changing
|
|
||||||
"tomorrow_expected_after",
|
|
||||||
"level_value",
|
|
||||||
"rating_value",
|
|
||||||
"level_id",
|
|
||||||
"rating_id",
|
|
||||||
"currency",
|
|
||||||
"resolution",
|
|
||||||
"yaxis_min",
|
|
||||||
"yaxis_max",
|
|
||||||
# Temporary/Time-Bound
|
|
||||||
"next_api_poll",
|
|
||||||
"next_midnight_turnover",
|
|
||||||
"last_update", # Lifecycle sensor last update timestamp
|
|
||||||
"last_turnover",
|
|
||||||
"last_error",
|
|
||||||
"error",
|
|
||||||
# Relaxation Details
|
|
||||||
"relaxation_level",
|
|
||||||
"relaxation_threshold_original_%",
|
|
||||||
"relaxation_threshold_applied_%",
|
|
||||||
# Redundant/Derived (removed from attributes, kept here for safety)
|
|
||||||
"volatility",
|
|
||||||
"diff_%",
|
|
||||||
"rating_difference_%",
|
|
||||||
"period_price_diff_from_daily_min",
|
|
||||||
"period_price_diff_from_daily_min_%",
|
|
||||||
"periods_total",
|
|
||||||
"periods_remaining",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
|
|
@ -162,8 +100,6 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
self.entity_description = entity_description
|
self.entity_description = entity_description
|
||||||
self._attr_unique_id = f"{coordinator.config_entry.entry_id}_{entity_description.key}"
|
self._attr_unique_id = f"{coordinator.config_entry.entry_id}_{entity_description.key}"
|
||||||
self._attr_has_entity_name = True
|
self._attr_has_entity_name = True
|
||||||
# Cached data for attributes (e.g., median values)
|
|
||||||
self.cached_data: dict[str, Any] = {}
|
|
||||||
# Instantiate calculators
|
# Instantiate calculators
|
||||||
self._metadata_calculator = TibberPricesMetadataCalculator(coordinator)
|
self._metadata_calculator = TibberPricesMetadataCalculator(coordinator)
|
||||||
self._volatility_calculator = TibberPricesVolatilityCalculator(coordinator)
|
self._volatility_calculator = TibberPricesVolatilityCalculator(coordinator)
|
||||||
|
|
@ -177,88 +113,15 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
self._value_getter: Callable | None = self._get_value_getter()
|
self._value_getter: Callable | None = self._get_value_getter()
|
||||||
self._time_sensitive_remove_listener: Callable | None = None
|
self._time_sensitive_remove_listener: Callable | None = None
|
||||||
self._minute_update_remove_listener: Callable | None = None
|
self._minute_update_remove_listener: Callable | None = None
|
||||||
# Lifecycle sensor state change detection (for recorder optimization)
|
|
||||||
# Store as Any because native_value can be str/float/datetime depending on sensor type
|
|
||||||
self._last_lifecycle_state: Any = None
|
|
||||||
# Chart data export (for chart_data_export sensor) - from binary_sensor
|
# Chart data export (for chart_data_export sensor) - from binary_sensor
|
||||||
self._chart_data_last_update = None # Track last service call timestamp
|
self._chart_data_last_update = None # Track last service call timestamp
|
||||||
self._chart_data_error = None # Track last service call error
|
self._chart_data_error = None # Track last service call error
|
||||||
self._chart_data_response = None # Store service response for attributes
|
self._chart_data_response = None # Store service response for attributes
|
||||||
# Chart metadata (for chart_metadata sensor)
|
|
||||||
self._chart_metadata_last_update = None # Track last service call timestamp
|
|
||||||
self._chart_metadata_error = None # Track last service call error
|
|
||||||
self._chart_metadata_response = None # Store service response for attributes
|
|
||||||
|
|
||||||
async def async_added_to_hass(self) -> None:
|
async def async_added_to_hass(self) -> None:
|
||||||
"""When entity is added to hass."""
|
"""When entity is added to hass."""
|
||||||
await super().async_added_to_hass()
|
await super().async_added_to_hass()
|
||||||
|
|
||||||
# Configure dynamic attribute exclusion for average sensors
|
|
||||||
self._configure_average_sensor_exclusions()
|
|
||||||
|
|
||||||
# Restore last state if available
|
|
||||||
await self._restore_last_state()
|
|
||||||
|
|
||||||
# Register listeners for time-sensitive updates
|
|
||||||
self._register_update_listeners()
|
|
||||||
|
|
||||||
# Trigger initial chart data loads as background tasks
|
|
||||||
self._trigger_chart_data_loads()
|
|
||||||
|
|
||||||
def _configure_average_sensor_exclusions(self) -> None:
|
|
||||||
"""Configure dynamic attribute exclusions for average sensors."""
|
|
||||||
# Dynamically exclude average attribute that matches state value
|
|
||||||
# (to avoid recording the same value twice: once as state, once as attribute)
|
|
||||||
key = self.entity_description.key
|
|
||||||
if key in (
|
|
||||||
"average_price_today",
|
|
||||||
"average_price_tomorrow",
|
|
||||||
"trailing_price_average",
|
|
||||||
"leading_price_average",
|
|
||||||
"current_hour_average_price",
|
|
||||||
"next_hour_average_price",
|
|
||||||
) or key.startswith("next_avg_"): # Future average sensors
|
|
||||||
display_mode = self.coordinator.config_entry.options.get(
|
|
||||||
CONF_AVERAGE_SENSOR_DISPLAY,
|
|
||||||
DEFAULT_AVERAGE_SENSOR_DISPLAY,
|
|
||||||
)
|
|
||||||
# Modify _state_info to add dynamic exclusion
|
|
||||||
if self._state_info is None:
|
|
||||||
self._state_info = {"unrecorded_attributes": frozenset()}
|
|
||||||
current_unrecorded = self._state_info.get("unrecorded_attributes", frozenset())
|
|
||||||
# State shows median → exclude price_median from attributes
|
|
||||||
# State shows mean → exclude price_mean from attributes
|
|
||||||
if display_mode == "median":
|
|
||||||
self._state_info["unrecorded_attributes"] = current_unrecorded | {"price_median"}
|
|
||||||
else:
|
|
||||||
self._state_info["unrecorded_attributes"] = current_unrecorded | {"price_mean"}
|
|
||||||
|
|
||||||
async def _restore_last_state(self) -> None:
|
|
||||||
"""Restore last state if available."""
|
|
||||||
if (
|
|
||||||
(last_state := await self.async_get_last_state()) is not None
|
|
||||||
and last_state.state not in (None, "unknown", "unavailable", "")
|
|
||||||
and (last_sensor_data := await self.async_get_last_sensor_data()) is not None
|
|
||||||
):
|
|
||||||
# Restore native_value from extra data (more reliable than state)
|
|
||||||
self._attr_native_value = last_sensor_data.native_value
|
|
||||||
|
|
||||||
# For chart sensors, restore response data from attributes
|
|
||||||
if self.entity_description.key == "chart_data_export":
|
|
||||||
self._chart_data_response = last_state.attributes.get("data")
|
|
||||||
self._chart_data_last_update = last_state.attributes.get("last_update")
|
|
||||||
elif self.entity_description.key == "chart_metadata":
|
|
||||||
# Restore metadata response from attributes
|
|
||||||
metadata_attrs = {}
|
|
||||||
for key in ["title", "yaxis_min", "yaxis_max", "currency", "resolution"]:
|
|
||||||
if key in last_state.attributes:
|
|
||||||
metadata_attrs[key] = last_state.attributes[key]
|
|
||||||
if metadata_attrs:
|
|
||||||
self._chart_metadata_response = metadata_attrs
|
|
||||||
self._chart_metadata_last_update = last_state.attributes.get("last_update")
|
|
||||||
|
|
||||||
def _register_update_listeners(self) -> None:
|
|
||||||
"""Register listeners for time-sensitive updates."""
|
|
||||||
# Register with coordinator for time-sensitive updates if applicable
|
# Register with coordinator for time-sensitive updates if applicable
|
||||||
if self.entity_description.key in TIME_SENSITIVE_ENTITY_KEYS:
|
if self.entity_description.key in TIME_SENSITIVE_ENTITY_KEYS:
|
||||||
self._time_sensitive_remove_listener = self.coordinator.async_add_time_sensitive_listener(
|
self._time_sensitive_remove_listener = self.coordinator.async_add_time_sensitive_listener(
|
||||||
|
|
@ -271,17 +134,9 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
self._handle_minute_update
|
self._handle_minute_update
|
||||||
)
|
)
|
||||||
|
|
||||||
def _trigger_chart_data_loads(self) -> None:
|
# For chart_data_export, trigger initial service call
|
||||||
"""Trigger initial chart data loads as background tasks."""
|
|
||||||
# For chart_data_export, trigger initial service call as background task
|
|
||||||
# (non-blocking to avoid delaying entity setup)
|
|
||||||
if self.entity_description.key == "chart_data_export":
|
if self.entity_description.key == "chart_data_export":
|
||||||
self.hass.async_create_task(self._refresh_chart_data())
|
await self._refresh_chart_data()
|
||||||
|
|
||||||
# For chart_metadata, trigger initial service call as background task
|
|
||||||
# (non-blocking to avoid delaying entity setup)
|
|
||||||
if self.entity_description.key == "chart_metadata":
|
|
||||||
self.hass.async_create_task(self._refresh_chart_metadata())
|
|
||||||
|
|
||||||
async def async_will_remove_from_hass(self) -> None:
|
async def async_will_remove_from_hass(self) -> None:
|
||||||
"""When entity will be removed from hass."""
|
"""When entity will be removed from hass."""
|
||||||
|
|
@ -315,18 +170,7 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
# Clear trend calculation cache for trend sensors
|
# Clear trend calculation cache for trend sensors
|
||||||
elif self.entity_description.key in ("current_price_trend", "next_price_trend_change"):
|
elif self.entity_description.key in ("current_price_trend", "next_price_trend_change"):
|
||||||
self._trend_calculator.clear_calculation_cache()
|
self._trend_calculator.clear_calculation_cache()
|
||||||
|
self.async_write_ha_state()
|
||||||
# For lifecycle sensor: Only write state if it actually changed (state-change filter)
|
|
||||||
# This enables precise detection at quarter-hour boundaries (23:45 turnover_pending,
|
|
||||||
# 13:00 searching_tomorrow, 00:00 turnover complete) without recorder spam
|
|
||||||
if self.entity_description.key == "data_lifecycle_status":
|
|
||||||
current_state = self.native_value
|
|
||||||
if current_state != self._last_lifecycle_state:
|
|
||||||
self._last_lifecycle_state = current_state
|
|
||||||
self.async_write_ha_state()
|
|
||||||
# If state didn't change, skip write to recorder
|
|
||||||
else:
|
|
||||||
self.async_write_ha_state()
|
|
||||||
|
|
||||||
@callback
|
@callback
|
||||||
def _handle_minute_update(self, time_service: TibberPricesTimeService) -> None:
|
def _handle_minute_update(self, time_service: TibberPricesTimeService) -> None:
|
||||||
|
|
@ -348,29 +192,13 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
# Clear cached trend values when coordinator data changes
|
# Clear cached trend values when coordinator data changes
|
||||||
if self.entity_description.key.startswith("price_trend_"):
|
if self.entity_description.key.startswith("price_trend_"):
|
||||||
self._trend_calculator.clear_trend_cache()
|
self._trend_calculator.clear_trend_cache()
|
||||||
# Also clear calculation cache (e.g., when threshold config changes)
|
|
||||||
self._trend_calculator.clear_calculation_cache()
|
|
||||||
|
|
||||||
# Refresh chart data when coordinator updates (new price data or user data)
|
# Refresh chart data when coordinator updates (new price data or user data)
|
||||||
if self.entity_description.key == "chart_data_export":
|
if self.entity_description.key == "chart_data_export":
|
||||||
# Schedule async refresh as a task (we're in a callback)
|
# Schedule async refresh as a task (we're in a callback)
|
||||||
self.hass.async_create_task(self._refresh_chart_data())
|
self.hass.async_create_task(self._refresh_chart_data())
|
||||||
|
|
||||||
# Refresh chart metadata when coordinator updates (new price data or user data)
|
super()._handle_coordinator_update()
|
||||||
if self.entity_description.key == "chart_metadata":
|
|
||||||
# Schedule async refresh as a task (we're in a callback)
|
|
||||||
self.hass.async_create_task(self._refresh_chart_metadata())
|
|
||||||
|
|
||||||
# For lifecycle sensor: Only write state if it actually changed (event-based filter)
|
|
||||||
# Prevents excessive recorder entries while keeping quarter-hour update capability
|
|
||||||
if self.entity_description.key == "data_lifecycle_status":
|
|
||||||
current_state = self.native_value
|
|
||||||
if current_state != self._last_lifecycle_state:
|
|
||||||
self._last_lifecycle_state = current_state
|
|
||||||
super()._handle_coordinator_update()
|
|
||||||
# If state didn't change, skip write to recorder
|
|
||||||
else:
|
|
||||||
super()._handle_coordinator_update()
|
|
||||||
|
|
||||||
def _get_value_getter(self) -> Callable | None:
|
def _get_value_getter(self) -> Callable | None:
|
||||||
"""Return the appropriate value getter method based on the sensor type."""
|
"""Return the appropriate value getter method based on the sensor type."""
|
||||||
|
|
@ -388,7 +216,6 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
get_next_avg_n_hours_value=self._get_next_avg_n_hours_value,
|
get_next_avg_n_hours_value=self._get_next_avg_n_hours_value,
|
||||||
get_data_timestamp=self._get_data_timestamp,
|
get_data_timestamp=self._get_data_timestamp,
|
||||||
get_chart_data_export_value=self._get_chart_data_export_value,
|
get_chart_data_export_value=self._get_chart_data_export_value,
|
||||||
get_chart_metadata_value=self._get_chart_metadata_value,
|
|
||||||
)
|
)
|
||||||
return handlers.get(self.entity_description.key)
|
return handlers.get(self.entity_description.key)
|
||||||
|
|
||||||
|
|
@ -421,7 +248,7 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Aggregated value based on type:
|
Aggregated value based on type:
|
||||||
- "price": float (average price in subunit currency units)
|
- "price": float (average price in minor currency units)
|
||||||
- "level": str (aggregated level: "very_cheap", "cheap", etc.)
|
- "level": str (aggregated level: "very_cheap", "cheap", etc.)
|
||||||
- "rating": str (aggregated rating: "low", "normal", "high")
|
- "rating": str (aggregated rating: "low", "normal", "high")
|
||||||
|
|
||||||
|
|
@ -452,15 +279,7 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
if not window_data:
|
if not window_data:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
result = self._rolling_hour_calculator.aggregate_window_data(window_data, value_type)
|
return self._rolling_hour_calculator.aggregate_window_data(window_data, value_type)
|
||||||
# For price type, aggregate_window_data returns (avg, median)
|
|
||||||
if isinstance(result, tuple):
|
|
||||||
avg, median = result
|
|
||||||
# Cache median for attributes
|
|
||||||
if median is not None:
|
|
||||||
self.cached_data[f"{self.entity_description.key}_median"] = median
|
|
||||||
return avg
|
|
||||||
return result
|
|
||||||
|
|
||||||
# ========================================================================
|
# ========================================================================
|
||||||
# INTERVAL-BASED VALUE METHODS
|
# INTERVAL-BASED VALUE METHODS
|
||||||
|
|
@ -490,7 +309,7 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
stat_func: Statistical function (min, max, or lambda for avg)
|
stat_func: Statistical function (min, max, or lambda for avg)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Price value in subunit currency units (cents/øre), or None if unavailable
|
Price value in minor currency units (cents/øre), or None if unavailable
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if not self.coordinator.data:
|
if not self.coordinator.data:
|
||||||
|
|
@ -525,8 +344,8 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
self._last_extreme_interval = pi["interval"]
|
self._last_extreme_interval = pi["interval"]
|
||||||
break
|
break
|
||||||
|
|
||||||
# Return in configured display currency units with 2 decimals
|
# Always return in minor currency units (cents/øre) with 2 decimals
|
||||||
result = get_price_value(value, config_entry=self.coordinator.config_entry)
|
result = get_price_value(value, in_euro=False)
|
||||||
return round(result, 2)
|
return round(result, 2)
|
||||||
|
|
||||||
def _get_daily_aggregated_value(
|
def _get_daily_aggregated_value(
|
||||||
|
|
@ -589,10 +408,10 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
- "leading": Next 24 hours (96 intervals after current)
|
- "leading": Next 24 hours (96 intervals after current)
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
stat_func: Function from average_utils (e.g., calculate_current_trailing_mean)
|
stat_func: Function from average_utils (e.g., calculate_current_trailing_avg)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Price value in subunit currency units (cents/øre), or None if unavailable
|
Price value in minor currency units (cents/øre), or None if unavailable
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if not self.coordinator.data:
|
if not self.coordinator.data:
|
||||||
|
|
@ -603,8 +422,8 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
if value is None:
|
if value is None:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Return in configured display currency units with 2 decimals
|
# Always return in minor currency units (cents/øre) with 2 decimals
|
||||||
result = get_price_value(value, config_entry=self.coordinator.config_entry)
|
result = get_price_value(value, in_euro=False)
|
||||||
return round(result, 2)
|
return round(result, 2)
|
||||||
|
|
||||||
def _translate_rating_level(self, level: str) -> str:
|
def _translate_rating_level(self, level: str) -> str:
|
||||||
|
|
@ -638,37 +457,21 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
|
|
||||||
def _get_next_avg_n_hours_value(self, hours: int) -> float | None:
|
def _get_next_avg_n_hours_value(self, hours: int) -> float | None:
|
||||||
"""
|
"""
|
||||||
Get mean price for next N hours starting from next interval.
|
Get average price for next N hours starting from next interval.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
hours: Number of hours to look ahead (1, 2, 3, 4, 5, 6, 8, 12)
|
hours: Number of hours to look ahead (1, 2, 3, 4, 5, 6, 8, 12)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Mean or median price (based on config) in subunit currency units (e.g., cents),
|
Average price in minor currency units (e.g., cents), or None if unavailable
|
||||||
or None if unavailable
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
mean_price, median_price = calculate_next_n_hours_mean(self.coordinator.data, hours, time=self.coordinator.time)
|
avg_price = calculate_next_n_hours_avg(self.coordinator.data, hours, time=self.coordinator.time)
|
||||||
if mean_price is None:
|
if avg_price is None:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Get display unit factor (100 for minor, 1 for major)
|
# Convert from major to minor currency units (e.g., EUR to cents)
|
||||||
factor = get_display_unit_factor(self.coordinator.config_entry)
|
return round(avg_price * 100, 2)
|
||||||
|
|
||||||
# Get user preference for display (mean or median)
|
|
||||||
display_pref = self.coordinator.config_entry.options.get(
|
|
||||||
CONF_AVERAGE_SENSOR_DISPLAY, DEFAULT_AVERAGE_SENSOR_DISPLAY
|
|
||||||
)
|
|
||||||
|
|
||||||
# Store both values for attributes
|
|
||||||
self.cached_data[f"next_avg_{hours}h_mean"] = round(mean_price * factor, 2)
|
|
||||||
if median_price is not None:
|
|
||||||
self.cached_data[f"next_avg_{hours}h_median"] = round(median_price * factor, 2)
|
|
||||||
|
|
||||||
# Return the value chosen for state display
|
|
||||||
if display_pref == "median" and median_price is not None:
|
|
||||||
return round(median_price * factor, 2)
|
|
||||||
return round(mean_price * factor, 2) # "mean"
|
|
||||||
|
|
||||||
def _get_data_timestamp(self) -> datetime | None:
|
def _get_data_timestamp(self) -> datetime | None:
|
||||||
"""
|
"""
|
||||||
|
|
@ -728,12 +531,25 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
if not prices_to_analyze:
|
if not prices_to_analyze:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Calculate volatility level with custom thresholds
|
# Calculate spread and basic statistics
|
||||||
# Note: Volatility calculation (coefficient of variation) uses mean internally
|
price_min = min(prices_to_analyze)
|
||||||
|
price_max = max(prices_to_analyze)
|
||||||
|
spread = price_max - price_min
|
||||||
|
price_avg = sum(prices_to_analyze) / len(prices_to_analyze)
|
||||||
|
|
||||||
|
# Convert to minor currency units (ct/øre) for display
|
||||||
|
spread_minor = spread * 100
|
||||||
|
|
||||||
|
# Calculate volatility level with custom thresholds (pass price list, not spread)
|
||||||
volatility = calculate_volatility_level(prices_to_analyze, **thresholds)
|
volatility = calculate_volatility_level(prices_to_analyze, **thresholds)
|
||||||
|
|
||||||
# Store minimal attributes (only unique info not available in other sensors)
|
# Store attributes for this sensor
|
||||||
self._last_volatility_attributes = {
|
self._last_volatility_attributes = {
|
||||||
|
"price_spread": round(spread_minor, 2),
|
||||||
|
"price_volatility": volatility,
|
||||||
|
"price_min": round(price_min * 100, 2),
|
||||||
|
"price_max": round(price_max * 100, 2),
|
||||||
|
"price_avg": round(price_avg * 100, 2),
|
||||||
"interval_count": len(prices_to_analyze),
|
"interval_count": len(prices_to_analyze),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -860,7 +676,7 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def native_value(self) -> float | str | datetime | None: # noqa: PLR0912
|
def native_value(self) -> float | str | datetime | None:
|
||||||
"""Return the native value of the sensor."""
|
"""Return the native value of the sensor."""
|
||||||
try:
|
try:
|
||||||
if not self.coordinator.data or not self._value_getter:
|
if not self.coordinator.data or not self._value_getter:
|
||||||
|
|
@ -868,8 +684,7 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
# For price_level, ensure we return the translated value as state
|
# For price_level, ensure we return the translated value as state
|
||||||
if self.entity_description.key == "current_interval_price_level":
|
if self.entity_description.key == "current_interval_price_level":
|
||||||
return self._interval_calculator.get_price_level_value()
|
return self._interval_calculator.get_price_level_value()
|
||||||
|
return self._value_getter()
|
||||||
result = self._value_getter()
|
|
||||||
except (KeyError, ValueError, TypeError) as ex:
|
except (KeyError, ValueError, TypeError) as ex:
|
||||||
self.coordinator.logger.exception(
|
self.coordinator.logger.exception(
|
||||||
"Error getting sensor value",
|
"Error getting sensor value",
|
||||||
|
|
@ -879,48 +694,6 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
return None
|
return None
|
||||||
else:
|
|
||||||
# Handle tuple results (average + median) from calculators
|
|
||||||
if isinstance(result, tuple):
|
|
||||||
avg, median = result
|
|
||||||
# Get user preference for state display
|
|
||||||
display_pref = self.coordinator.config_entry.options.get(
|
|
||||||
CONF_AVERAGE_SENSOR_DISPLAY,
|
|
||||||
DEFAULT_AVERAGE_SENSOR_DISPLAY,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Cache BOTH values for attribute builders to use
|
|
||||||
key = self.entity_description.key
|
|
||||||
if "average_price_today" in key:
|
|
||||||
self.cached_data["average_price_today_mean"] = avg
|
|
||||||
self.cached_data["average_price_today_median"] = median
|
|
||||||
elif "average_price_tomorrow" in key:
|
|
||||||
self.cached_data["average_price_tomorrow_mean"] = avg
|
|
||||||
self.cached_data["average_price_tomorrow_median"] = median
|
|
||||||
elif "trailing_price_average" in key:
|
|
||||||
self.cached_data["trailing_price_mean"] = avg
|
|
||||||
self.cached_data["trailing_price_median"] = median
|
|
||||||
elif "leading_price_average" in key:
|
|
||||||
self.cached_data["leading_price_mean"] = avg
|
|
||||||
self.cached_data["leading_price_median"] = median
|
|
||||||
elif "current_hour_average_price" in key:
|
|
||||||
self.cached_data["rolling_hour_0_mean"] = avg
|
|
||||||
self.cached_data["rolling_hour_0_median"] = median
|
|
||||||
elif "next_hour_average_price" in key:
|
|
||||||
self.cached_data["rolling_hour_1_mean"] = avg
|
|
||||||
self.cached_data["rolling_hour_1_median"] = median
|
|
||||||
elif key.startswith("next_avg_"):
|
|
||||||
# Extract hours from key (e.g., "next_avg_3h" -> "3")
|
|
||||||
hours = key.split("_")[-1].replace("h", "")
|
|
||||||
self.cached_data[f"next_avg_{hours}h_mean"] = avg
|
|
||||||
self.cached_data[f"next_avg_{hours}h_median"] = median
|
|
||||||
|
|
||||||
# Return the value chosen for state display
|
|
||||||
if display_pref == "median":
|
|
||||||
return median
|
|
||||||
return avg # "mean"
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def native_unit_of_measurement(self) -> str | None:
|
def native_unit_of_measurement(self) -> str | None:
|
||||||
|
|
@ -931,13 +704,12 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
if self.coordinator.data:
|
if self.coordinator.data:
|
||||||
currency = self.coordinator.data.get("currency")
|
currency = self.coordinator.data.get("currency")
|
||||||
|
|
||||||
# Special case: Energy Dashboard sensor always uses base currency
|
# Use major currency unit for Energy Dashboard sensor
|
||||||
# regardless of user display mode configuration
|
if self.entity_description.key == "current_interval_price_major":
|
||||||
if self.entity_description.key == "current_interval_price_base":
|
return format_price_unit_major(currency)
|
||||||
return format_price_unit_base(currency)
|
|
||||||
|
|
||||||
# Get unit based on user configuration (major or minor)
|
# Use minor currency unit for all other price sensors
|
||||||
return get_display_unit_string(self.coordinator.config_entry, currency)
|
return format_price_unit_minor(currency)
|
||||||
|
|
||||||
# For all other sensors, use unit from entity description
|
# For all other sensors, use unit from entity description
|
||||||
return self.entity_description.native_unit_of_measurement
|
return self.entity_description.native_unit_of_measurement
|
||||||
|
|
@ -946,12 +718,7 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
"""Check if the current time is within a best price period."""
|
"""Check if the current time is within a best price period."""
|
||||||
if not self.coordinator.data:
|
if not self.coordinator.data:
|
||||||
return False
|
return False
|
||||||
attrs = get_price_intervals_attributes(
|
attrs = get_price_intervals_attributes(self.coordinator.data, reverse_sort=False, time=self.coordinator.time)
|
||||||
self.coordinator.data,
|
|
||||||
reverse_sort=False,
|
|
||||||
time=self.coordinator.time,
|
|
||||||
config_entry=self.coordinator.config_entry,
|
|
||||||
)
|
|
||||||
if not attrs:
|
if not attrs:
|
||||||
return False
|
return False
|
||||||
start = attrs.get("start")
|
start = attrs.get("start")
|
||||||
|
|
@ -966,12 +733,7 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
"""Check if the current time is within a peak price period."""
|
"""Check if the current time is within a peak price period."""
|
||||||
if not self.coordinator.data:
|
if not self.coordinator.data:
|
||||||
return False
|
return False
|
||||||
attrs = get_price_intervals_attributes(
|
attrs = get_price_intervals_attributes(self.coordinator.data, reverse_sort=True, time=self.coordinator.time)
|
||||||
self.coordinator.data,
|
|
||||||
reverse_sort=True,
|
|
||||||
time=self.coordinator.time,
|
|
||||||
config_entry=self.coordinator.config_entry,
|
|
||||||
)
|
|
||||||
if not attrs:
|
if not attrs:
|
||||||
return False
|
return False
|
||||||
start = attrs.get("start")
|
start = attrs.get("start")
|
||||||
|
|
@ -987,13 +749,11 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
key = self.entity_description.key
|
key = self.entity_description.key
|
||||||
value = self.native_value
|
value = self.native_value
|
||||||
|
|
||||||
# Icon mapping for trend directions (5-level scale)
|
# Icon mapping for trend directions
|
||||||
trend_icons = {
|
trend_icons = {
|
||||||
"strongly_rising": "mdi:chevron-double-up",
|
|
||||||
"rising": "mdi:trending-up",
|
"rising": "mdi:trending-up",
|
||||||
"stable": "mdi:trending-neutral",
|
|
||||||
"falling": "mdi:trending-down",
|
"falling": "mdi:trending-down",
|
||||||
"strongly_falling": "mdi:chevron-double-down",
|
"stable": "mdi:trending-neutral",
|
||||||
}
|
}
|
||||||
|
|
||||||
# Special handling for next_price_trend_change: Icon based on direction attribute
|
# Special handling for next_price_trend_change: Icon based on direction attribute
|
||||||
|
|
@ -1030,43 +790,6 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
# Fall back to static icon from entity description
|
# Fall back to static icon from entity description
|
||||||
return icon or self.entity_description.icon
|
return icon or self.entity_description.icon
|
||||||
|
|
||||||
@property
|
|
||||||
def suggested_display_precision(self) -> int | None:
|
|
||||||
"""
|
|
||||||
Return suggested display precision based on currency display mode.
|
|
||||||
|
|
||||||
For MONETARY sensors:
|
|
||||||
- Current/Next Interval Price: Show exact price with higher precision
|
|
||||||
- Base currency (€/kr): 4 decimals (e.g., 0.1234 €)
|
|
||||||
- Subunit currency (ct/øre): 2 decimals (e.g., 12.34 ct)
|
|
||||||
- All other price sensors:
|
|
||||||
- Base currency (€/kr): 2 decimals (e.g., 0.12 €)
|
|
||||||
- Subunit currency (ct/øre): 1 decimal (e.g., 12.5 ct)
|
|
||||||
|
|
||||||
For non-MONETARY sensors, use static value from entity description.
|
|
||||||
"""
|
|
||||||
# Only apply dynamic precision to MONETARY sensors
|
|
||||||
if self.entity_description.device_class != SensorDeviceClass.MONETARY:
|
|
||||||
return self.entity_description.suggested_display_precision
|
|
||||||
|
|
||||||
# Check display mode configuration
|
|
||||||
display_mode = self.coordinator.config_entry.options.get(CONF_CURRENCY_DISPLAY_MODE, DISPLAY_MODE_BASE)
|
|
||||||
|
|
||||||
# Special case: Energy Dashboard sensor always shows base currency with 4 decimals
|
|
||||||
# regardless of display mode (it's always in base currency by design)
|
|
||||||
if self.entity_description.key == "current_interval_price_base":
|
|
||||||
return 4
|
|
||||||
|
|
||||||
# Special case: Current and Next interval price sensors get higher precision
|
|
||||||
# to show exact prices as received from API
|
|
||||||
if self.entity_description.key in ("current_interval_price", "next_interval_price"):
|
|
||||||
# Major: 4 decimals (0.1234 €), Minor: 2 decimals (12.34 ct)
|
|
||||||
return 4 if display_mode == DISPLAY_MODE_BASE else 2
|
|
||||||
|
|
||||||
# All other sensors: Standard precision
|
|
||||||
# Major: 2 decimals (0.12 €), Minor: 1 decimal (12.5 ct)
|
|
||||||
return 2 if display_mode == DISPLAY_MODE_BASE else 1
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def extra_state_attributes(self) -> dict[str, Any] | None:
|
def extra_state_attributes(self) -> dict[str, Any] | None:
|
||||||
"""Return additional state attributes."""
|
"""Return additional state attributes."""
|
||||||
|
|
@ -1108,30 +831,20 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
if key == "chart_data_export":
|
if key == "chart_data_export":
|
||||||
return self._get_chart_data_export_attributes()
|
return self._get_chart_data_export_attributes()
|
||||||
|
|
||||||
# Special handling for chart_metadata - returns metadata in attributes
|
|
||||||
if key == "chart_metadata":
|
|
||||||
return self._get_chart_metadata_attributes()
|
|
||||||
|
|
||||||
# Prepare cached data that attribute builders might need
|
# Prepare cached data that attribute builders might need
|
||||||
# Start with all mean/median values from self.cached_data
|
cached_data = {
|
||||||
cached_data = {k: v for k, v in self.cached_data.items() if "_mean" in k or "_median" in k}
|
"trend_attributes": self._trend_calculator.get_trend_attributes(),
|
||||||
|
"current_trend_attributes": self._trend_calculator.get_current_trend_attributes(),
|
||||||
# Add special calculator results
|
"trend_change_attributes": self._trend_calculator.get_trend_change_attributes(),
|
||||||
cached_data.update(
|
"volatility_attributes": self._volatility_calculator.get_volatility_attributes(),
|
||||||
{
|
"last_extreme_interval": self._daily_stat_calculator.get_last_extreme_interval(),
|
||||||
"trend_attributes": self._trend_calculator.get_trend_attributes(),
|
"last_price_level": self._interval_calculator.get_last_price_level(),
|
||||||
"current_trend_attributes": self._trend_calculator.get_current_trend_attributes(),
|
"last_rating_difference": self._interval_calculator.get_last_rating_difference(),
|
||||||
"trend_change_attributes": self._trend_calculator.get_trend_change_attributes(),
|
"last_rating_level": self._interval_calculator.get_last_rating_level(),
|
||||||
"volatility_attributes": self._volatility_calculator.get_volatility_attributes(),
|
"data_timestamp": getattr(self, "_data_timestamp", None),
|
||||||
"last_extreme_interval": self._daily_stat_calculator.get_last_extreme_interval(),
|
"rolling_hour_level": self._get_rolling_hour_level_for_cached_data(key),
|
||||||
"last_price_level": self._interval_calculator.get_last_price_level(),
|
"lifecycle_calculator": self._lifecycle_calculator, # For lifecycle sensor attributes
|
||||||
"last_rating_difference": self._interval_calculator.get_last_rating_difference(),
|
}
|
||||||
"last_rating_level": self._interval_calculator.get_last_rating_level(),
|
|
||||||
"data_timestamp": getattr(self, "_data_timestamp", None),
|
|
||||||
"rolling_hour_level": self._get_rolling_hour_level_for_cached_data(key),
|
|
||||||
"lifecycle_calculator": self._lifecycle_calculator, # For lifecycle sensor attributes
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
# Use the centralized attribute builder
|
# Use the centralized attribute builder
|
||||||
return build_sensor_attributes(
|
return build_sensor_attributes(
|
||||||
|
|
@ -1139,7 +852,6 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
coordinator=self.coordinator,
|
coordinator=self.coordinator,
|
||||||
native_value=self.native_value,
|
native_value=self.native_value,
|
||||||
cached_data=cached_data,
|
cached_data=cached_data,
|
||||||
config_entry=self.coordinator.config_entry,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def _get_rolling_hour_level_for_cached_data(self, key: str) -> str | None:
|
def _get_rolling_hour_level_for_cached_data(self, key: str) -> str | None:
|
||||||
|
|
@ -1194,36 +906,3 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
||||||
chart_data_last_update=self._chart_data_last_update,
|
chart_data_last_update=self._chart_data_last_update,
|
||||||
chart_data_error=self._chart_data_error,
|
chart_data_error=self._chart_data_error,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _get_chart_metadata_value(self) -> str | None:
|
|
||||||
"""Return state for chart_metadata sensor."""
|
|
||||||
return get_chart_metadata_state(
|
|
||||||
chart_metadata_response=self._chart_metadata_response,
|
|
||||||
chart_metadata_error=self._chart_metadata_error,
|
|
||||||
)
|
|
||||||
|
|
||||||
async def _refresh_chart_metadata(self) -> None:
|
|
||||||
"""Refresh chart metadata by calling get_chartdata service with metadata=only."""
|
|
||||||
response, error = await call_chartdata_service_for_metadata_async(
|
|
||||||
hass=self.hass,
|
|
||||||
coordinator=self.coordinator,
|
|
||||||
config_entry=self.coordinator.config_entry,
|
|
||||||
)
|
|
||||||
self._chart_metadata_response = response
|
|
||||||
time = self.coordinator.time
|
|
||||||
self._chart_metadata_last_update = time.now()
|
|
||||||
self._chart_metadata_error = error
|
|
||||||
# Trigger state update after refresh
|
|
||||||
self.async_write_ha_state()
|
|
||||||
|
|
||||||
def _get_chart_metadata_attributes(self) -> dict[str, object] | None:
|
|
||||||
"""
|
|
||||||
Return chart metadata from last service call as attributes.
|
|
||||||
|
|
||||||
Delegates to chart_metadata module for attribute building.
|
|
||||||
"""
|
|
||||||
return build_chart_metadata_attributes(
|
|
||||||
chart_metadata_response=self._chart_metadata_response,
|
|
||||||
chart_metadata_last_update=self._chart_metadata_last_update,
|
|
||||||
chart_metadata_error=self._chart_metadata_error,
|
|
||||||
)
|
|
||||||
|
|
|
||||||
|
|
@ -68,13 +68,13 @@ INTERVAL_PRICE_SENSORS = (
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=2,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
key="current_interval_price_base",
|
key="current_interval_price_major",
|
||||||
translation_key="current_interval_price_base",
|
translation_key="current_interval_price_major",
|
||||||
name="Current Electricity Price (Energy Dashboard)",
|
name="Current Electricity Price (Energy Dashboard)",
|
||||||
icon="mdi:cash", # Dynamic: shows cash-multiple/plus/cash/minus/remove based on price level
|
icon="mdi:cash", # Dynamic: shows cash-multiple/plus/cash/minus/remove based on price level
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None for Energy Dashboard
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None for Energy Dashboard
|
||||||
suggested_display_precision=4, # More precision for base currency (e.g., 0.2534 EUR/kWh)
|
suggested_display_precision=4, # More precision for major currency (e.g., 0.2534 EUR/kWh)
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
key="next_interval_price",
|
key="next_interval_price",
|
||||||
|
|
@ -181,7 +181,7 @@ ROLLING_HOUR_PRICE_SENSORS = (
|
||||||
icon="mdi:cash", # Dynamic: shows cash-multiple/plus/cash/minus/remove based on aggregated price level
|
icon="mdi:cash", # Dynamic: shows cash-multiple/plus/cash/minus/remove based on aggregated price level
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
key="next_hour_average_price",
|
key="next_hour_average_price",
|
||||||
|
|
@ -190,7 +190,7 @@ ROLLING_HOUR_PRICE_SENSORS = (
|
||||||
icon="mdi:cash-fast", # Dynamic: shows cash-multiple/plus/cash/minus/remove based on aggregated price level
|
icon="mdi:cash-fast", # Dynamic: shows cash-multiple/plus/cash/minus/remove based on aggregated price level
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -259,7 +259,7 @@ DAILY_STAT_SENSORS = (
|
||||||
icon="mdi:arrow-collapse-down",
|
icon="mdi:arrow-collapse-down",
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
key="highest_price_today",
|
key="highest_price_today",
|
||||||
|
|
@ -268,7 +268,7 @@ DAILY_STAT_SENSORS = (
|
||||||
icon="mdi:arrow-collapse-up",
|
icon="mdi:arrow-collapse-up",
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
key="average_price_today",
|
key="average_price_today",
|
||||||
|
|
@ -277,7 +277,7 @@ DAILY_STAT_SENSORS = (
|
||||||
icon="mdi:chart-line",
|
icon="mdi:chart-line",
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
key="lowest_price_tomorrow",
|
key="lowest_price_tomorrow",
|
||||||
|
|
@ -286,7 +286,7 @@ DAILY_STAT_SENSORS = (
|
||||||
icon="mdi:arrow-collapse-down",
|
icon="mdi:arrow-collapse-down",
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
key="highest_price_tomorrow",
|
key="highest_price_tomorrow",
|
||||||
|
|
@ -295,7 +295,7 @@ DAILY_STAT_SENSORS = (
|
||||||
icon="mdi:arrow-collapse-up",
|
icon="mdi:arrow-collapse-up",
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
key="average_price_tomorrow",
|
key="average_price_tomorrow",
|
||||||
|
|
@ -304,7 +304,7 @@ DAILY_STAT_SENSORS = (
|
||||||
icon="mdi:chart-line",
|
icon="mdi:chart-line",
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -395,7 +395,7 @@ WINDOW_24H_SENSORS = (
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
entity_registry_enabled_default=False,
|
entity_registry_enabled_default=False,
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
key="leading_price_average",
|
key="leading_price_average",
|
||||||
|
|
@ -405,7 +405,7 @@ WINDOW_24H_SENSORS = (
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
entity_registry_enabled_default=False, # Advanced use case
|
entity_registry_enabled_default=False, # Advanced use case
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
key="trailing_price_min",
|
key="trailing_price_min",
|
||||||
|
|
@ -415,7 +415,7 @@ WINDOW_24H_SENSORS = (
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
entity_registry_enabled_default=False,
|
entity_registry_enabled_default=False,
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
key="trailing_price_max",
|
key="trailing_price_max",
|
||||||
|
|
@ -425,7 +425,7 @@ WINDOW_24H_SENSORS = (
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
entity_registry_enabled_default=False,
|
entity_registry_enabled_default=False,
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
key="leading_price_min",
|
key="leading_price_min",
|
||||||
|
|
@ -435,7 +435,7 @@ WINDOW_24H_SENSORS = (
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
entity_registry_enabled_default=False, # Advanced use case
|
entity_registry_enabled_default=False, # Advanced use case
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
key="leading_price_max",
|
key="leading_price_max",
|
||||||
|
|
@ -445,7 +445,7 @@ WINDOW_24H_SENSORS = (
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
entity_registry_enabled_default=False, # Advanced use case
|
entity_registry_enabled_default=False, # Advanced use case
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -454,7 +454,7 @@ WINDOW_24H_SENSORS = (
|
||||||
# ----------------------------------------------------------------------------
|
# ----------------------------------------------------------------------------
|
||||||
# Calculate averages and trends for upcoming time windows
|
# Calculate averages and trends for upcoming time windows
|
||||||
|
|
||||||
FUTURE_MEAN_SENSORS = (
|
FUTURE_AVG_SENSORS = (
|
||||||
# Default enabled: 1h-5h
|
# Default enabled: 1h-5h
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
key="next_avg_1h",
|
key="next_avg_1h",
|
||||||
|
|
@ -463,7 +463,7 @@ FUTURE_MEAN_SENSORS = (
|
||||||
icon="mdi:chart-line",
|
icon="mdi:chart-line",
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
entity_registry_enabled_default=True,
|
entity_registry_enabled_default=True,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -473,7 +473,7 @@ FUTURE_MEAN_SENSORS = (
|
||||||
icon="mdi:chart-line",
|
icon="mdi:chart-line",
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
entity_registry_enabled_default=True,
|
entity_registry_enabled_default=True,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -483,7 +483,7 @@ FUTURE_MEAN_SENSORS = (
|
||||||
icon="mdi:chart-line",
|
icon="mdi:chart-line",
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
entity_registry_enabled_default=True,
|
entity_registry_enabled_default=True,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -493,7 +493,7 @@ FUTURE_MEAN_SENSORS = (
|
||||||
icon="mdi:chart-line",
|
icon="mdi:chart-line",
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
entity_registry_enabled_default=True,
|
entity_registry_enabled_default=True,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -503,7 +503,7 @@ FUTURE_MEAN_SENSORS = (
|
||||||
icon="mdi:chart-line",
|
icon="mdi:chart-line",
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
entity_registry_enabled_default=True,
|
entity_registry_enabled_default=True,
|
||||||
),
|
),
|
||||||
# Disabled by default: 6h, 8h, 12h (advanced use cases)
|
# Disabled by default: 6h, 8h, 12h (advanced use cases)
|
||||||
|
|
@ -514,7 +514,7 @@ FUTURE_MEAN_SENSORS = (
|
||||||
icon="mdi:chart-line",
|
icon="mdi:chart-line",
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
entity_registry_enabled_default=False,
|
entity_registry_enabled_default=False,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -524,7 +524,7 @@ FUTURE_MEAN_SENSORS = (
|
||||||
icon="mdi:chart-line",
|
icon="mdi:chart-line",
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
entity_registry_enabled_default=False,
|
entity_registry_enabled_default=False,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -534,7 +534,7 @@ FUTURE_MEAN_SENSORS = (
|
||||||
icon="mdi:chart-line",
|
icon="mdi:chart-line",
|
||||||
device_class=SensorDeviceClass.MONETARY,
|
device_class=SensorDeviceClass.MONETARY,
|
||||||
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
state_class=SensorStateClass.TOTAL, # MONETARY requires TOTAL or None
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=1,
|
||||||
entity_registry_enabled_default=False,
|
entity_registry_enabled_default=False,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
@ -548,7 +548,7 @@ FUTURE_TREND_SENSORS = (
|
||||||
icon="mdi:trending-up", # Dynamic: trending-up/trending-down/trending-neutral based on current trend
|
icon="mdi:trending-up", # Dynamic: trending-up/trending-down/trending-neutral based on current trend
|
||||||
device_class=SensorDeviceClass.ENUM,
|
device_class=SensorDeviceClass.ENUM,
|
||||||
state_class=None, # Enum values: no statistics
|
state_class=None, # Enum values: no statistics
|
||||||
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
options=["rising", "falling", "stable"],
|
||||||
entity_registry_enabled_default=True,
|
entity_registry_enabled_default=True,
|
||||||
),
|
),
|
||||||
# Next trend change sensor (when will trend change?)
|
# Next trend change sensor (when will trend change?)
|
||||||
|
|
@ -570,7 +570,7 @@ FUTURE_TREND_SENSORS = (
|
||||||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||||
device_class=SensorDeviceClass.ENUM,
|
device_class=SensorDeviceClass.ENUM,
|
||||||
state_class=None, # Enum values: no statistics
|
state_class=None, # Enum values: no statistics
|
||||||
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
options=["rising", "falling", "stable"],
|
||||||
entity_registry_enabled_default=True,
|
entity_registry_enabled_default=True,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -580,7 +580,7 @@ FUTURE_TREND_SENSORS = (
|
||||||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||||
device_class=SensorDeviceClass.ENUM,
|
device_class=SensorDeviceClass.ENUM,
|
||||||
state_class=None, # Enum values: no statistics
|
state_class=None, # Enum values: no statistics
|
||||||
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
options=["rising", "falling", "stable"],
|
||||||
entity_registry_enabled_default=True,
|
entity_registry_enabled_default=True,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -590,7 +590,7 @@ FUTURE_TREND_SENSORS = (
|
||||||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||||
device_class=SensorDeviceClass.ENUM,
|
device_class=SensorDeviceClass.ENUM,
|
||||||
state_class=None, # Enum values: no statistics
|
state_class=None, # Enum values: no statistics
|
||||||
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
options=["rising", "falling", "stable"],
|
||||||
entity_registry_enabled_default=True,
|
entity_registry_enabled_default=True,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -600,7 +600,7 @@ FUTURE_TREND_SENSORS = (
|
||||||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||||
device_class=SensorDeviceClass.ENUM,
|
device_class=SensorDeviceClass.ENUM,
|
||||||
state_class=None, # Enum values: no statistics
|
state_class=None, # Enum values: no statistics
|
||||||
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
options=["rising", "falling", "stable"],
|
||||||
entity_registry_enabled_default=True,
|
entity_registry_enabled_default=True,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -610,7 +610,7 @@ FUTURE_TREND_SENSORS = (
|
||||||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||||
device_class=SensorDeviceClass.ENUM,
|
device_class=SensorDeviceClass.ENUM,
|
||||||
state_class=None, # Enum values: no statistics
|
state_class=None, # Enum values: no statistics
|
||||||
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
options=["rising", "falling", "stable"],
|
||||||
entity_registry_enabled_default=True,
|
entity_registry_enabled_default=True,
|
||||||
),
|
),
|
||||||
# Disabled by default: 6h, 8h, 12h
|
# Disabled by default: 6h, 8h, 12h
|
||||||
|
|
@ -621,7 +621,7 @@ FUTURE_TREND_SENSORS = (
|
||||||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||||
device_class=SensorDeviceClass.ENUM,
|
device_class=SensorDeviceClass.ENUM,
|
||||||
state_class=None, # Enum values: no statistics
|
state_class=None, # Enum values: no statistics
|
||||||
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
options=["rising", "falling", "stable"],
|
||||||
entity_registry_enabled_default=False,
|
entity_registry_enabled_default=False,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -631,7 +631,7 @@ FUTURE_TREND_SENSORS = (
|
||||||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||||
device_class=SensorDeviceClass.ENUM,
|
device_class=SensorDeviceClass.ENUM,
|
||||||
state_class=None, # Enum values: no statistics
|
state_class=None, # Enum values: no statistics
|
||||||
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
options=["rising", "falling", "stable"],
|
||||||
entity_registry_enabled_default=False,
|
entity_registry_enabled_default=False,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -641,7 +641,7 @@ FUTURE_TREND_SENSORS = (
|
||||||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||||
device_class=SensorDeviceClass.ENUM,
|
device_class=SensorDeviceClass.ENUM,
|
||||||
state_class=None, # Enum values: no statistics
|
state_class=None, # Enum values: no statistics
|
||||||
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
options=["rising", "falling", "stable"],
|
||||||
entity_registry_enabled_default=False,
|
entity_registry_enabled_default=False,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
@ -731,9 +731,9 @@ BEST_PRICE_TIMING_SENSORS = (
|
||||||
name="Best Price Period Duration",
|
name="Best Price Period Duration",
|
||||||
icon="mdi:timer",
|
icon="mdi:timer",
|
||||||
device_class=SensorDeviceClass.DURATION,
|
device_class=SensorDeviceClass.DURATION,
|
||||||
native_unit_of_measurement=UnitOfTime.HOURS,
|
native_unit_of_measurement=UnitOfTime.MINUTES,
|
||||||
state_class=None, # Duration not needed in long-term statistics
|
state_class=None, # Changes with each period: no statistics
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=0,
|
||||||
entity_registry_enabled_default=False,
|
entity_registry_enabled_default=False,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -741,10 +741,9 @@ BEST_PRICE_TIMING_SENSORS = (
|
||||||
translation_key="best_price_remaining_minutes",
|
translation_key="best_price_remaining_minutes",
|
||||||
name="Best Price Remaining Time",
|
name="Best Price Remaining Time",
|
||||||
icon="mdi:timer-sand",
|
icon="mdi:timer-sand",
|
||||||
device_class=SensorDeviceClass.DURATION,
|
native_unit_of_measurement=UnitOfTime.MINUTES,
|
||||||
native_unit_of_measurement=UnitOfTime.HOURS,
|
state_class=None, # Countdown timer: no statistics
|
||||||
state_class=None, # Countdown timers excluded from statistics
|
suggested_display_precision=0,
|
||||||
suggested_display_precision=2,
|
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
key="best_price_progress",
|
key="best_price_progress",
|
||||||
|
|
@ -768,10 +767,9 @@ BEST_PRICE_TIMING_SENSORS = (
|
||||||
translation_key="best_price_next_in_minutes",
|
translation_key="best_price_next_in_minutes",
|
||||||
name="Best Price Starts In",
|
name="Best Price Starts In",
|
||||||
icon="mdi:timer-outline",
|
icon="mdi:timer-outline",
|
||||||
device_class=SensorDeviceClass.DURATION,
|
native_unit_of_measurement=UnitOfTime.MINUTES,
|
||||||
native_unit_of_measurement=UnitOfTime.HOURS,
|
state_class=None, # Countdown timer: no statistics
|
||||||
state_class=None, # Next-start timers excluded from statistics
|
suggested_display_precision=0,
|
||||||
suggested_display_precision=2,
|
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -790,9 +788,9 @@ PEAK_PRICE_TIMING_SENSORS = (
|
||||||
name="Peak Price Period Duration",
|
name="Peak Price Period Duration",
|
||||||
icon="mdi:timer",
|
icon="mdi:timer",
|
||||||
device_class=SensorDeviceClass.DURATION,
|
device_class=SensorDeviceClass.DURATION,
|
||||||
native_unit_of_measurement=UnitOfTime.HOURS,
|
native_unit_of_measurement=UnitOfTime.MINUTES,
|
||||||
state_class=None, # Duration not needed in long-term statistics
|
state_class=None, # Changes with each period: no statistics
|
||||||
suggested_display_precision=2,
|
suggested_display_precision=0,
|
||||||
entity_registry_enabled_default=False,
|
entity_registry_enabled_default=False,
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -800,10 +798,9 @@ PEAK_PRICE_TIMING_SENSORS = (
|
||||||
translation_key="peak_price_remaining_minutes",
|
translation_key="peak_price_remaining_minutes",
|
||||||
name="Peak Price Remaining Time",
|
name="Peak Price Remaining Time",
|
||||||
icon="mdi:timer-sand",
|
icon="mdi:timer-sand",
|
||||||
device_class=SensorDeviceClass.DURATION,
|
native_unit_of_measurement=UnitOfTime.MINUTES,
|
||||||
native_unit_of_measurement=UnitOfTime.HOURS,
|
state_class=None, # Countdown timer: no statistics
|
||||||
state_class=None, # Countdown timers excluded from statistics
|
suggested_display_precision=0,
|
||||||
suggested_display_precision=2,
|
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
key="peak_price_progress",
|
key="peak_price_progress",
|
||||||
|
|
@ -827,10 +824,9 @@ PEAK_PRICE_TIMING_SENSORS = (
|
||||||
translation_key="peak_price_next_in_minutes",
|
translation_key="peak_price_next_in_minutes",
|
||||||
name="Peak Price Starts In",
|
name="Peak Price Starts In",
|
||||||
icon="mdi:timer-outline",
|
icon="mdi:timer-outline",
|
||||||
device_class=SensorDeviceClass.DURATION,
|
native_unit_of_measurement=UnitOfTime.MINUTES,
|
||||||
native_unit_of_measurement=UnitOfTime.HOURS,
|
state_class=None, # Countdown timer: no statistics
|
||||||
state_class=None, # Next-start timers excluded from statistics
|
suggested_display_precision=0,
|
||||||
suggested_display_precision=2,
|
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -847,7 +843,6 @@ DIAGNOSTIC_SENSORS = (
|
||||||
options=["cached", "fresh", "refreshing", "searching_tomorrow", "turnover_pending", "error"],
|
options=["cached", "fresh", "refreshing", "searching_tomorrow", "turnover_pending", "error"],
|
||||||
state_class=None, # Status value: no statistics
|
state_class=None, # Status value: no statistics
|
||||||
entity_category=EntityCategory.DIAGNOSTIC,
|
entity_category=EntityCategory.DIAGNOSTIC,
|
||||||
entity_registry_enabled_default=True, # Critical for debugging
|
|
||||||
),
|
),
|
||||||
# Home metadata from user data
|
# Home metadata from user data
|
||||||
SensorEntityDescription(
|
SensorEntityDescription(
|
||||||
|
|
@ -1008,16 +1003,6 @@ DIAGNOSTIC_SENSORS = (
|
||||||
entity_category=EntityCategory.DIAGNOSTIC,
|
entity_category=EntityCategory.DIAGNOSTIC,
|
||||||
entity_registry_enabled_default=False, # Opt-in
|
entity_registry_enabled_default=False, # Opt-in
|
||||||
),
|
),
|
||||||
SensorEntityDescription(
|
|
||||||
key="chart_metadata",
|
|
||||||
translation_key="chart_metadata",
|
|
||||||
name="Chart Metadata",
|
|
||||||
icon="mdi:chart-box-outline",
|
|
||||||
device_class=SensorDeviceClass.ENUM,
|
|
||||||
options=["pending", "ready", "error"],
|
|
||||||
entity_category=EntityCategory.DIAGNOSTIC,
|
|
||||||
entity_registry_enabled_default=True, # Critical for chart features
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# ----------------------------------------------------------------------------
|
# ----------------------------------------------------------------------------
|
||||||
|
|
@ -1035,7 +1020,7 @@ ENTITY_DESCRIPTIONS = (
|
||||||
*DAILY_LEVEL_SENSORS,
|
*DAILY_LEVEL_SENSORS,
|
||||||
*DAILY_RATING_SENSORS,
|
*DAILY_RATING_SENSORS,
|
||||||
*WINDOW_24H_SENSORS,
|
*WINDOW_24H_SENSORS,
|
||||||
*FUTURE_MEAN_SENSORS,
|
*FUTURE_AVG_SENSORS,
|
||||||
*FUTURE_TREND_SENSORS,
|
*FUTURE_TREND_SENSORS,
|
||||||
*VOLATILITY_SENSORS,
|
*VOLATILITY_SENSORS,
|
||||||
*BEST_PRICE_TIMING_SENSORS,
|
*BEST_PRICE_TIMING_SENSORS,
|
||||||
|
|
|
||||||
|
|
@ -23,12 +23,9 @@ from typing import TYPE_CHECKING
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
||||||
from homeassistant.config_entries import ConfigEntry
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import get_display_unit_factor
|
|
||||||
from custom_components.tibber_prices.coordinator.helpers import get_intervals_for_day_offsets
|
from custom_components.tibber_prices.coordinator.helpers import get_intervals_for_day_offsets
|
||||||
from custom_components.tibber_prices.entity_utils.helpers import get_price_value
|
from custom_components.tibber_prices.entity_utils.helpers import get_price_value
|
||||||
from custom_components.tibber_prices.utils.average import calculate_mean, calculate_median
|
|
||||||
from custom_components.tibber_prices.utils.price import (
|
from custom_components.tibber_prices.utils.price import (
|
||||||
aggregate_price_levels,
|
aggregate_price_levels,
|
||||||
aggregate_price_rating,
|
aggregate_price_rating,
|
||||||
|
|
@ -38,31 +35,22 @@ if TYPE_CHECKING:
|
||||||
from collections.abc import Callable
|
from collections.abc import Callable
|
||||||
|
|
||||||
|
|
||||||
def aggregate_average_data(
|
def aggregate_price_data(window_data: list[dict]) -> float | None:
|
||||||
window_data: list[dict],
|
|
||||||
config_entry: ConfigEntry,
|
|
||||||
) -> tuple[float | None, float | None]:
|
|
||||||
"""
|
"""
|
||||||
Calculate average and median price from window data.
|
Calculate average price from window data.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
window_data: List of price interval dictionaries with 'total' key.
|
window_data: List of price interval dictionaries with 'total' key
|
||||||
config_entry: Config entry to get display unit configuration.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tuple of (average price, median price) in display currency units,
|
Average price in minor currency units (cents/øre), or None if no prices
|
||||||
or (None, None) if no prices.
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
prices = [float(i["total"]) for i in window_data if "total" in i]
|
prices = [float(i["total"]) for i in window_data if "total" in i]
|
||||||
if not prices:
|
if not prices:
|
||||||
return None, None
|
return None
|
||||||
# Calculate both mean and median
|
# Return in minor currency units (cents/øre)
|
||||||
mean = calculate_mean(prices)
|
return round((sum(prices) / len(prices)) * 100, 2)
|
||||||
median = calculate_median(prices)
|
|
||||||
# Convert to display currency unit based on configuration
|
|
||||||
factor = get_display_unit_factor(config_entry)
|
|
||||||
return round(mean * factor, 2), round(median * factor, 2) if median is not None else None
|
|
||||||
|
|
||||||
|
|
||||||
def aggregate_level_data(window_data: list[dict]) -> str | None:
|
def aggregate_level_data(window_data: list[dict]) -> str | None:
|
||||||
|
|
@ -113,29 +101,25 @@ def aggregate_window_data(
|
||||||
value_type: str,
|
value_type: str,
|
||||||
threshold_low: float,
|
threshold_low: float,
|
||||||
threshold_high: float,
|
threshold_high: float,
|
||||||
config_entry: ConfigEntry,
|
|
||||||
) -> str | float | None:
|
) -> str | float | None:
|
||||||
"""
|
"""
|
||||||
Aggregate data from multiple intervals based on value type.
|
Aggregate data from multiple intervals based on value type.
|
||||||
|
|
||||||
Unified helper that routes to appropriate aggregation function.
|
Unified helper that routes to appropriate aggregation function.
|
||||||
|
|
||||||
NOTE: This function is legacy code - rolling_hour calculator has its own implementation.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
window_data: List of price interval dictionaries.
|
window_data: List of price interval dictionaries
|
||||||
value_type: Type of value to aggregate ('price', 'level', or 'rating').
|
value_type: Type of value to aggregate ('price', 'level', or 'rating')
|
||||||
threshold_low: Low threshold for rating calculation.
|
threshold_low: Low threshold for rating calculation
|
||||||
threshold_high: High threshold for rating calculation.
|
threshold_high: High threshold for rating calculation
|
||||||
config_entry: Config entry to get display unit configuration.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Aggregated value (price as float, level/rating as str), or None if no data.
|
Aggregated value (price as float, level/rating as str), or None if no data
|
||||||
|
|
||||||
"""
|
"""
|
||||||
# Map value types to aggregation functions
|
# Map value types to aggregation functions
|
||||||
aggregators: dict[str, Callable] = {
|
aggregators: dict[str, Callable] = {
|
||||||
"price": lambda data: aggregate_average_data(data, config_entry)[0], # Use only average from tuple
|
"price": lambda data: aggregate_price_data(data),
|
||||||
"level": lambda data: aggregate_level_data(data),
|
"level": lambda data: aggregate_level_data(data),
|
||||||
"rating": lambda data: aggregate_rating_data(data, threshold_low, threshold_high),
|
"rating": lambda data: aggregate_rating_data(data, threshold_low, threshold_high),
|
||||||
}
|
}
|
||||||
|
|
@ -162,7 +146,7 @@ def get_hourly_price_value(
|
||||||
Args:
|
Args:
|
||||||
coordinator_data: Coordinator data dict
|
coordinator_data: Coordinator data dict
|
||||||
hour_offset: Hour offset from current time (positive=future, negative=past)
|
hour_offset: Hour offset from current time (positive=future, negative=past)
|
||||||
in_euro: If True, return price in base currency (EUR), else minor (cents/øre)
|
in_euro: If True, return price in major currency (EUR), else minor (cents/øre)
|
||||||
time: TibberPricesTimeService instance (required)
|
time: TibberPricesTimeService instance (required)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
|
|
|
||||||
|
|
@ -2,17 +2,15 @@
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from typing import TYPE_CHECKING, cast
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
from custom_components.tibber_prices.utils.average import (
|
from custom_components.tibber_prices.utils.average import (
|
||||||
|
calculate_current_leading_avg,
|
||||||
calculate_current_leading_max,
|
calculate_current_leading_max,
|
||||||
calculate_current_leading_mean,
|
|
||||||
calculate_current_leading_min,
|
calculate_current_leading_min,
|
||||||
|
calculate_current_trailing_avg,
|
||||||
calculate_current_trailing_max,
|
calculate_current_trailing_max,
|
||||||
calculate_current_trailing_mean,
|
|
||||||
calculate_current_trailing_min,
|
calculate_current_trailing_min,
|
||||||
calculate_mean,
|
|
||||||
calculate_median,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
|
@ -43,7 +41,6 @@ def get_value_getter_mapping( # noqa: PLR0913 - needs all calculators as parame
|
||||||
get_next_avg_n_hours_value: Callable[[int], float | None],
|
get_next_avg_n_hours_value: Callable[[int], float | None],
|
||||||
get_data_timestamp: Callable[[], datetime | None],
|
get_data_timestamp: Callable[[], datetime | None],
|
||||||
get_chart_data_export_value: Callable[[], str | None],
|
get_chart_data_export_value: Callable[[], str | None],
|
||||||
get_chart_metadata_value: Callable[[], str | None],
|
|
||||||
) -> dict[str, Callable]:
|
) -> dict[str, Callable]:
|
||||||
"""
|
"""
|
||||||
Build mapping from entity key to value getter callable.
|
Build mapping from entity key to value getter callable.
|
||||||
|
|
@ -64,20 +61,11 @@ def get_value_getter_mapping( # noqa: PLR0913 - needs all calculators as parame
|
||||||
get_next_avg_n_hours_value: Method for next N-hour average forecasts
|
get_next_avg_n_hours_value: Method for next N-hour average forecasts
|
||||||
get_data_timestamp: Method for data timestamp sensor
|
get_data_timestamp: Method for data timestamp sensor
|
||||||
get_chart_data_export_value: Method for chart data export sensor
|
get_chart_data_export_value: Method for chart data export sensor
|
||||||
get_chart_metadata_value: Method for chart metadata sensor
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dictionary mapping entity keys to their value getter callables.
|
Dictionary mapping entity keys to their value getter callables.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def _minutes_to_hours(value: float | None) -> float | None:
|
|
||||||
"""Convert minutes to hours for duration-oriented sensors."""
|
|
||||||
if value is None:
|
|
||||||
return None
|
|
||||||
|
|
||||||
return value / 60
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
# ================================================================
|
# ================================================================
|
||||||
# INTERVAL-BASED SENSORS - via IntervalCalculator
|
# INTERVAL-BASED SENSORS - via IntervalCalculator
|
||||||
|
|
@ -94,7 +82,7 @@ def get_value_getter_mapping( # noqa: PLR0913 - needs all calculators as parame
|
||||||
"current_interval_price": lambda: interval_calculator.get_interval_value(
|
"current_interval_price": lambda: interval_calculator.get_interval_value(
|
||||||
interval_offset=0, value_type="price", in_euro=False
|
interval_offset=0, value_type="price", in_euro=False
|
||||||
),
|
),
|
||||||
"current_interval_price_base": lambda: interval_calculator.get_interval_value(
|
"current_interval_price_major": lambda: interval_calculator.get_interval_value(
|
||||||
interval_offset=0, value_type="price", in_euro=True
|
interval_offset=0, value_type="price", in_euro=True
|
||||||
),
|
),
|
||||||
"next_interval_price": lambda: interval_calculator.get_interval_value(
|
"next_interval_price": lambda: interval_calculator.get_interval_value(
|
||||||
|
|
@ -140,14 +128,14 @@ def get_value_getter_mapping( # noqa: PLR0913 - needs all calculators as parame
|
||||||
"highest_price_today": lambda: daily_stat_calculator.get_daily_stat_value(day="today", stat_func=max),
|
"highest_price_today": lambda: daily_stat_calculator.get_daily_stat_value(day="today", stat_func=max),
|
||||||
"average_price_today": lambda: daily_stat_calculator.get_daily_stat_value(
|
"average_price_today": lambda: daily_stat_calculator.get_daily_stat_value(
|
||||||
day="today",
|
day="today",
|
||||||
stat_func=lambda prices: (calculate_mean(prices), calculate_median(prices)),
|
stat_func=lambda prices: sum(prices) / len(prices),
|
||||||
),
|
),
|
||||||
# Tomorrow statistics sensors
|
# Tomorrow statistics sensors
|
||||||
"lowest_price_tomorrow": lambda: daily_stat_calculator.get_daily_stat_value(day="tomorrow", stat_func=min),
|
"lowest_price_tomorrow": lambda: daily_stat_calculator.get_daily_stat_value(day="tomorrow", stat_func=min),
|
||||||
"highest_price_tomorrow": lambda: daily_stat_calculator.get_daily_stat_value(day="tomorrow", stat_func=max),
|
"highest_price_tomorrow": lambda: daily_stat_calculator.get_daily_stat_value(day="tomorrow", stat_func=max),
|
||||||
"average_price_tomorrow": lambda: daily_stat_calculator.get_daily_stat_value(
|
"average_price_tomorrow": lambda: daily_stat_calculator.get_daily_stat_value(
|
||||||
day="tomorrow",
|
day="tomorrow",
|
||||||
stat_func=lambda prices: (calculate_mean(prices), calculate_median(prices)),
|
stat_func=lambda prices: sum(prices) / len(prices),
|
||||||
),
|
),
|
||||||
# Daily aggregated level sensors
|
# Daily aggregated level sensors
|
||||||
"yesterday_price_level": lambda: daily_stat_calculator.get_daily_aggregated_value(
|
"yesterday_price_level": lambda: daily_stat_calculator.get_daily_aggregated_value(
|
||||||
|
|
@ -172,10 +160,10 @@ def get_value_getter_mapping( # noqa: PLR0913 - needs all calculators as parame
|
||||||
# ================================================================
|
# ================================================================
|
||||||
# Trailing and leading average sensors
|
# Trailing and leading average sensors
|
||||||
"trailing_price_average": lambda: window_24h_calculator.get_24h_window_value(
|
"trailing_price_average": lambda: window_24h_calculator.get_24h_window_value(
|
||||||
stat_func=calculate_current_trailing_mean,
|
stat_func=calculate_current_trailing_avg,
|
||||||
),
|
),
|
||||||
"leading_price_average": lambda: window_24h_calculator.get_24h_window_value(
|
"leading_price_average": lambda: window_24h_calculator.get_24h_window_value(
|
||||||
stat_func=calculate_current_leading_mean,
|
stat_func=calculate_current_leading_avg,
|
||||||
),
|
),
|
||||||
# Trailing and leading min/max sensors
|
# Trailing and leading min/max sensors
|
||||||
"trailing_price_min": lambda: window_24h_calculator.get_24h_window_value(
|
"trailing_price_min": lambda: window_24h_calculator.get_24h_window_value(
|
||||||
|
|
@ -251,17 +239,11 @@ def get_value_getter_mapping( # noqa: PLR0913 - needs all calculators as parame
|
||||||
"best_price_end_time": lambda: timing_calculator.get_period_timing_value(
|
"best_price_end_time": lambda: timing_calculator.get_period_timing_value(
|
||||||
period_type="best_price", value_type="end_time"
|
period_type="best_price", value_type="end_time"
|
||||||
),
|
),
|
||||||
"best_price_period_duration": lambda: _minutes_to_hours(
|
"best_price_period_duration": lambda: timing_calculator.get_period_timing_value(
|
||||||
cast(
|
period_type="best_price", value_type="period_duration"
|
||||||
"float | None",
|
|
||||||
timing_calculator.get_period_timing_value(period_type="best_price", value_type="period_duration"),
|
|
||||||
)
|
|
||||||
),
|
),
|
||||||
"best_price_remaining_minutes": lambda: _minutes_to_hours(
|
"best_price_remaining_minutes": lambda: timing_calculator.get_period_timing_value(
|
||||||
cast(
|
period_type="best_price", value_type="remaining_minutes"
|
||||||
"float | None",
|
|
||||||
timing_calculator.get_period_timing_value(period_type="best_price", value_type="remaining_minutes"),
|
|
||||||
)
|
|
||||||
),
|
),
|
||||||
"best_price_progress": lambda: timing_calculator.get_period_timing_value(
|
"best_price_progress": lambda: timing_calculator.get_period_timing_value(
|
||||||
period_type="best_price", value_type="progress"
|
period_type="best_price", value_type="progress"
|
||||||
|
|
@ -269,27 +251,18 @@ def get_value_getter_mapping( # noqa: PLR0913 - needs all calculators as parame
|
||||||
"best_price_next_start_time": lambda: timing_calculator.get_period_timing_value(
|
"best_price_next_start_time": lambda: timing_calculator.get_period_timing_value(
|
||||||
period_type="best_price", value_type="next_start_time"
|
period_type="best_price", value_type="next_start_time"
|
||||||
),
|
),
|
||||||
"best_price_next_in_minutes": lambda: _minutes_to_hours(
|
"best_price_next_in_minutes": lambda: timing_calculator.get_period_timing_value(
|
||||||
cast(
|
period_type="best_price", value_type="next_in_minutes"
|
||||||
"float | None",
|
|
||||||
timing_calculator.get_period_timing_value(period_type="best_price", value_type="next_in_minutes"),
|
|
||||||
)
|
|
||||||
),
|
),
|
||||||
# Peak Price timing sensors
|
# Peak Price timing sensors
|
||||||
"peak_price_end_time": lambda: timing_calculator.get_period_timing_value(
|
"peak_price_end_time": lambda: timing_calculator.get_period_timing_value(
|
||||||
period_type="peak_price", value_type="end_time"
|
period_type="peak_price", value_type="end_time"
|
||||||
),
|
),
|
||||||
"peak_price_period_duration": lambda: _minutes_to_hours(
|
"peak_price_period_duration": lambda: timing_calculator.get_period_timing_value(
|
||||||
cast(
|
period_type="peak_price", value_type="period_duration"
|
||||||
"float | None",
|
|
||||||
timing_calculator.get_period_timing_value(period_type="peak_price", value_type="period_duration"),
|
|
||||||
)
|
|
||||||
),
|
),
|
||||||
"peak_price_remaining_minutes": lambda: _minutes_to_hours(
|
"peak_price_remaining_minutes": lambda: timing_calculator.get_period_timing_value(
|
||||||
cast(
|
period_type="peak_price", value_type="remaining_minutes"
|
||||||
"float | None",
|
|
||||||
timing_calculator.get_period_timing_value(period_type="peak_price", value_type="remaining_minutes"),
|
|
||||||
)
|
|
||||||
),
|
),
|
||||||
"peak_price_progress": lambda: timing_calculator.get_period_timing_value(
|
"peak_price_progress": lambda: timing_calculator.get_period_timing_value(
|
||||||
period_type="peak_price", value_type="progress"
|
period_type="peak_price", value_type="progress"
|
||||||
|
|
@ -297,14 +270,9 @@ def get_value_getter_mapping( # noqa: PLR0913 - needs all calculators as parame
|
||||||
"peak_price_next_start_time": lambda: timing_calculator.get_period_timing_value(
|
"peak_price_next_start_time": lambda: timing_calculator.get_period_timing_value(
|
||||||
period_type="peak_price", value_type="next_start_time"
|
period_type="peak_price", value_type="next_start_time"
|
||||||
),
|
),
|
||||||
"peak_price_next_in_minutes": lambda: _minutes_to_hours(
|
"peak_price_next_in_minutes": lambda: timing_calculator.get_period_timing_value(
|
||||||
cast(
|
period_type="peak_price", value_type="next_in_minutes"
|
||||||
"float | None",
|
|
||||||
timing_calculator.get_period_timing_value(period_type="peak_price", value_type="next_in_minutes"),
|
|
||||||
)
|
|
||||||
),
|
),
|
||||||
# Chart data export sensor
|
# Chart data export sensor
|
||||||
"chart_data_export": get_chart_data_export_value,
|
"chart_data_export": get_chart_data_export_value,
|
||||||
# Chart metadata sensor
|
|
||||||
"chart_metadata": get_chart_metadata_value,
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,30 +1,54 @@
|
||||||
get_price:
|
get_price:
|
||||||
|
name: Get Price Data
|
||||||
|
description: >-
|
||||||
|
Fetch price data for a specific time range with automatic routing. Development and testing service for the price_info_for_range API function. Automatically uses PRICE_INFO, PRICE_INFO_RANGE, or both based on the time range boundary.
|
||||||
fields:
|
fields:
|
||||||
entry_id:
|
entry_id:
|
||||||
|
name: Entry ID
|
||||||
|
description: The config entry ID for the Tibber integration.
|
||||||
required: true
|
required: true
|
||||||
example: "1234567890abcdef"
|
example: "1234567890abcdef"
|
||||||
selector:
|
selector:
|
||||||
config_entry:
|
config_entry:
|
||||||
integration: tibber_prices
|
integration: tibber_prices
|
||||||
start_time:
|
start_time:
|
||||||
|
name: Start Time
|
||||||
|
description: Start of the time range (inclusive, timezone-aware).
|
||||||
required: true
|
required: true
|
||||||
example: "2025-11-01T00:00:00+01:00"
|
example: "2025-11-01T00:00:00+01:00"
|
||||||
selector:
|
selector:
|
||||||
datetime:
|
datetime:
|
||||||
end_time:
|
end_time:
|
||||||
|
name: End Time
|
||||||
|
description: End of the time range (exclusive, timezone-aware).
|
||||||
required: true
|
required: true
|
||||||
example: "2025-11-02T00:00:00+01:00"
|
example: "2025-11-02T00:00:00+01:00"
|
||||||
selector:
|
selector:
|
||||||
datetime:
|
datetime:
|
||||||
get_apexcharts_yaml:
|
get_apexcharts_yaml:
|
||||||
|
name: Get ApexCharts Card YAML
|
||||||
|
description: >-
|
||||||
|
⚠️ IMPORTANT: This service generates a BASIC EXAMPLE configuration for ApexCharts Card as a starting point. It is NOT a complete solution for all ApexCharts features.
|
||||||
|
This integration is primarily a DATA PROVIDER. The generated YAML demonstrates how to use the `get_chartdata` service to fetch price data. Due to the segmented nature of our data (different time periods per series) and the use of Home Assistant's service API instead of entity attributes, many advanced ApexCharts features (like in_header, certain transformations) are not compatible or require manual customization.
|
||||||
|
|
||||||
|
|
||||||
|
You are welcome to customize the generated YAML for your specific needs, but please understand that comprehensive ApexCharts configuration support is beyond the scope of this integration. Community contributions with improved configurations are always appreciated - if you find a better setup that works, please share it so everyone can benefit!
|
||||||
|
|
||||||
|
|
||||||
|
For direct data access to build your own charts, use the `get_chartdata` service instead.
|
||||||
fields:
|
fields:
|
||||||
entry_id:
|
entry_id:
|
||||||
|
name: Entry ID
|
||||||
|
description: The config entry ID for the Tibber integration.
|
||||||
required: true
|
required: true
|
||||||
example: "1234567890abcdef"
|
example: "1234567890abcdef"
|
||||||
selector:
|
selector:
|
||||||
config_entry:
|
config_entry:
|
||||||
integration: tibber_prices
|
integration: tibber_prices
|
||||||
day:
|
day:
|
||||||
|
name: Day
|
||||||
|
description: >-
|
||||||
|
Which day to visualize (yesterday, today, or tomorrow). If not specified, returns a rolling 2-day window: today+tomorrow (when tomorrow data is available) or yesterday+today (when tomorrow data is not yet available).
|
||||||
required: false
|
required: false
|
||||||
example: today
|
example: today
|
||||||
selector:
|
selector:
|
||||||
|
|
@ -33,10 +57,11 @@ get_apexcharts_yaml:
|
||||||
- yesterday
|
- yesterday
|
||||||
- today
|
- today
|
||||||
- tomorrow
|
- tomorrow
|
||||||
- rolling_window
|
|
||||||
- rolling_window_autozoom
|
|
||||||
translation_key: day
|
translation_key: day
|
||||||
level_type:
|
level_type:
|
||||||
|
name: Level Type
|
||||||
|
description: >-
|
||||||
|
Select which price level classification to visualize: 'rating_level' (LOW/NORMAL/HIGH based on your configured thresholds) or 'level' (Tibber API levels: VERY_CHEAP/CHEAP/NORMAL/EXPENSIVE/VERY_EXPENSIVE).
|
||||||
required: false
|
required: false
|
||||||
default: rating_level
|
default: rating_level
|
||||||
example: rating_level
|
example: rating_level
|
||||||
|
|
@ -46,33 +71,25 @@ get_apexcharts_yaml:
|
||||||
- rating_level
|
- rating_level
|
||||||
- level
|
- level
|
||||||
translation_key: level_type
|
translation_key: level_type
|
||||||
resolution:
|
|
||||||
required: false
|
|
||||||
default: interval
|
|
||||||
example: interval
|
|
||||||
selector:
|
|
||||||
select:
|
|
||||||
options:
|
|
||||||
- interval
|
|
||||||
- hourly
|
|
||||||
translation_key: resolution
|
|
||||||
highlight_best_price:
|
highlight_best_price:
|
||||||
|
name: Highlight Best Price Periods
|
||||||
|
description: >-
|
||||||
|
Add a semi-transparent green overlay to highlight the best price periods on the chart. This makes it easy to visually identify the optimal times for energy consumption.
|
||||||
required: false
|
required: false
|
||||||
default: true
|
default: true
|
||||||
example: true
|
example: true
|
||||||
selector:
|
selector:
|
||||||
boolean:
|
boolean:
|
||||||
highlight_peak_price:
|
|
||||||
required: false
|
|
||||||
default: false
|
|
||||||
example: false
|
|
||||||
selector:
|
|
||||||
boolean:
|
|
||||||
get_chartdata:
|
get_chartdata:
|
||||||
|
name: Get Chart Data
|
||||||
|
description: >-
|
||||||
|
Returns price data in a chart-friendly format compatible with Tibber Core output. Works with ha-price-timeline-card, ApexCharts, Plotly, Mini Graph Card, and History Graph. Field names and structure are configurable.
|
||||||
fields:
|
fields:
|
||||||
general:
|
general:
|
||||||
fields:
|
fields:
|
||||||
entry_id:
|
entry_id:
|
||||||
|
name: Entry ID
|
||||||
|
description: The config entry ID for the Tibber integration.
|
||||||
required: true
|
required: true
|
||||||
example: "1234567890abcdef"
|
example: "1234567890abcdef"
|
||||||
selector:
|
selector:
|
||||||
|
|
@ -82,6 +99,9 @@ get_chartdata:
|
||||||
collapsed: true
|
collapsed: true
|
||||||
fields:
|
fields:
|
||||||
day:
|
day:
|
||||||
|
name: Day
|
||||||
|
description: >-
|
||||||
|
Which day(s) to fetch prices for. You can select multiple days. If not specified, returns a rolling 2-day window: today+tomorrow (when tomorrow data is available) or yesterday+today (when tomorrow data is not yet available). This provides continuous chart display without gaps.
|
||||||
required: false
|
required: false
|
||||||
selector:
|
selector:
|
||||||
select:
|
select:
|
||||||
|
|
@ -92,6 +112,9 @@ get_chartdata:
|
||||||
multiple: true
|
multiple: true
|
||||||
translation_key: day
|
translation_key: day
|
||||||
resolution:
|
resolution:
|
||||||
|
name: Resolution
|
||||||
|
description: >-
|
||||||
|
Time resolution for the returned data. Options: 'interval' (default, 15-minute intervals, 96 points per day), 'hourly' (hourly averages, 24 points per day).
|
||||||
required: false
|
required: false
|
||||||
default: interval
|
default: interval
|
||||||
example: hourly
|
example: hourly
|
||||||
|
|
@ -105,6 +128,9 @@ get_chartdata:
|
||||||
collapsed: true
|
collapsed: true
|
||||||
fields:
|
fields:
|
||||||
level_filter:
|
level_filter:
|
||||||
|
name: Level Filter
|
||||||
|
description: >-
|
||||||
|
Filter intervals to include only specific Tibber price levels (very_cheap, cheap, normal, expensive, very_expensive). If not specified, all levels are included.
|
||||||
required: false
|
required: false
|
||||||
selector:
|
selector:
|
||||||
select:
|
select:
|
||||||
|
|
@ -117,6 +143,9 @@ get_chartdata:
|
||||||
multiple: true
|
multiple: true
|
||||||
translation_key: level_filter
|
translation_key: level_filter
|
||||||
rating_level_filter:
|
rating_level_filter:
|
||||||
|
name: Rating Level Filter
|
||||||
|
description: >-
|
||||||
|
Filter intervals to include only specific rating levels (low, normal, high). If not specified, all rating levels are included.
|
||||||
required: false
|
required: false
|
||||||
selector:
|
selector:
|
||||||
select:
|
select:
|
||||||
|
|
@ -127,6 +156,9 @@ get_chartdata:
|
||||||
multiple: true
|
multiple: true
|
||||||
translation_key: rating_level_filter
|
translation_key: rating_level_filter
|
||||||
period_filter:
|
period_filter:
|
||||||
|
name: Period Filter
|
||||||
|
description: >-
|
||||||
|
Filter intervals to include only those within Best Price or Peak Price periods. Options: 'best_price' (only intervals in Best Price periods), 'peak_price' (only intervals in Peak Price periods). If not specified, all intervals are included. This uses the precomputed period data from binary sensors (binary_sensor.best_price_period / binary_sensor.peak_price_period).
|
||||||
required: false
|
required: false
|
||||||
selector:
|
selector:
|
||||||
select:
|
select:
|
||||||
|
|
@ -137,13 +169,19 @@ get_chartdata:
|
||||||
transformation:
|
transformation:
|
||||||
collapsed: true
|
collapsed: true
|
||||||
fields:
|
fields:
|
||||||
subunit_currency:
|
minor_currency:
|
||||||
|
name: Minor Currency
|
||||||
|
description: >-
|
||||||
|
Return prices in minor currency units (cents for EUR, øre for NOK/SEK) instead of major currency units. Disabled by default.
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
example: true
|
example: true
|
||||||
selector:
|
selector:
|
||||||
boolean:
|
boolean:
|
||||||
round_decimals:
|
round_decimals:
|
||||||
|
name: Round Decimals
|
||||||
|
description: >-
|
||||||
|
Number of decimal places to round prices to (0-10). If not specified, uses default precision (4 decimals for major currency, 2 for minor currency).
|
||||||
required: false
|
required: false
|
||||||
example: 2
|
example: 2
|
||||||
selector:
|
selector:
|
||||||
|
|
@ -152,6 +190,12 @@ get_chartdata:
|
||||||
max: 10
|
max: 10
|
||||||
mode: box
|
mode: box
|
||||||
insert_nulls:
|
insert_nulls:
|
||||||
|
name: Insert NULL Values
|
||||||
|
description: >-
|
||||||
|
NULL insertion mode for filtered data.
|
||||||
|
• none (default): only matching intervals
|
||||||
|
• segments: add NULLs at segment boundaries (clean gaps)
|
||||||
|
• all: NULL for every non-matching timestamp
|
||||||
required: false
|
required: false
|
||||||
default: none
|
default: none
|
||||||
selector:
|
selector:
|
||||||
|
|
@ -162,11 +206,19 @@ get_chartdata:
|
||||||
- all
|
- all
|
||||||
translation_key: insert_nulls
|
translation_key: insert_nulls
|
||||||
connect_segments:
|
connect_segments:
|
||||||
|
name: Connect Segments
|
||||||
|
description: >-
|
||||||
|
Only with insert_nulls='segments'. Adds boundary points to visually connect segments (stepline).
|
||||||
|
• Downward boundary: lower price at end of current segment
|
||||||
|
• Upward boundary: hold previous price before the gap
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
selector:
|
selector:
|
||||||
boolean:
|
boolean:
|
||||||
add_trailing_null:
|
add_trailing_null:
|
||||||
|
name: Add Trailing Null Point
|
||||||
|
description: >-
|
||||||
|
Add a final data point with null values (except timestamp) at the end. Some chart libraries need this to prevent extrapolation/interpolation to the viewport edge when using stepline rendering. Leave disabled unless your chart requires it.
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
selector:
|
selector:
|
||||||
|
|
@ -175,6 +227,11 @@ get_chartdata:
|
||||||
collapsed: true
|
collapsed: true
|
||||||
fields:
|
fields:
|
||||||
output_format:
|
output_format:
|
||||||
|
name: Output Format
|
||||||
|
description: >-
|
||||||
|
Output format.
|
||||||
|
• array_of_objects (default): objects with configurable field names
|
||||||
|
• array_of_arrays: [timestamp, price] tuples
|
||||||
required: false
|
required: false
|
||||||
default: array_of_objects
|
default: array_of_objects
|
||||||
example: array_of_objects
|
example: array_of_objects
|
||||||
|
|
@ -185,64 +242,84 @@ get_chartdata:
|
||||||
- array_of_arrays
|
- array_of_arrays
|
||||||
translation_key: output_format
|
translation_key: output_format
|
||||||
data_key:
|
data_key:
|
||||||
|
name: Data Key
|
||||||
|
description: >-
|
||||||
|
Custom name for the top-level data key in the response. Defaults to "data" if not specified. For ApexCharts compatibility with array_of_arrays, use "points".
|
||||||
required: false
|
required: false
|
||||||
example: prices
|
example: prices
|
||||||
selector:
|
selector:
|
||||||
text:
|
text:
|
||||||
metadata:
|
|
||||||
required: false
|
|
||||||
default: include
|
|
||||||
selector:
|
|
||||||
select:
|
|
||||||
options:
|
|
||||||
- include
|
|
||||||
- only
|
|
||||||
- none
|
|
||||||
translation_key: metadata
|
|
||||||
arrays_of_objects:
|
arrays_of_objects:
|
||||||
collapsed: true
|
collapsed: true
|
||||||
fields:
|
fields:
|
||||||
include_level:
|
include_level:
|
||||||
|
name: Include Level
|
||||||
|
description: >-
|
||||||
|
Include Tibber price level (VERY_CHEAP … VERY_EXPENSIVE).
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
example: true
|
example: true
|
||||||
selector:
|
selector:
|
||||||
boolean:
|
boolean:
|
||||||
include_rating_level:
|
include_rating_level:
|
||||||
|
name: Include Rating Level
|
||||||
|
description: >-
|
||||||
|
Include rating level (LOW/NORMAL/HIGH) based on configured thresholds.
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
example: true
|
example: true
|
||||||
selector:
|
selector:
|
||||||
boolean:
|
boolean:
|
||||||
include_average:
|
include_average:
|
||||||
|
name: Include Average
|
||||||
|
description: >-
|
||||||
|
Include daily average price.
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
selector:
|
selector:
|
||||||
boolean:
|
boolean:
|
||||||
start_time_field:
|
start_time_field:
|
||||||
|
name: Start Time Field Name
|
||||||
|
description: >-
|
||||||
|
Custom name for the start time field in the output. Defaults to "start_time" if not specified.
|
||||||
required: false
|
required: false
|
||||||
example: time
|
example: time
|
||||||
selector:
|
selector:
|
||||||
text:
|
text:
|
||||||
end_time_field:
|
end_time_field:
|
||||||
|
name: End Time Field Name
|
||||||
|
description: >-
|
||||||
|
Custom name for the end time field in the output. Defaults to "end_time" if not specified. Only used with period_filter.
|
||||||
required: false
|
required: false
|
||||||
example: end
|
example: end
|
||||||
selector:
|
selector:
|
||||||
text:
|
text:
|
||||||
price_field:
|
price_field:
|
||||||
|
name: Price Field Name
|
||||||
|
description: >-
|
||||||
|
Custom name for the price field in the output. Defaults to "price_per_kwh" if not specified.
|
||||||
required: false
|
required: false
|
||||||
example: price
|
example: price
|
||||||
selector:
|
selector:
|
||||||
text:
|
text:
|
||||||
level_field:
|
level_field:
|
||||||
|
name: Level Field Name
|
||||||
|
description: >-
|
||||||
|
Custom name for the level field in the output. Defaults to "level" if not specified. Only used when include_level is enabled.
|
||||||
required: false
|
required: false
|
||||||
selector:
|
selector:
|
||||||
text:
|
text:
|
||||||
rating_level_field:
|
rating_level_field:
|
||||||
|
name: Rating Level Field Name
|
||||||
|
description: >-
|
||||||
|
Custom name for the rating_level field in the output. Defaults to "rating_level" if not specified. Only used when include_rating_level is enabled.
|
||||||
required: false
|
required: false
|
||||||
selector:
|
selector:
|
||||||
text:
|
text:
|
||||||
average_field:
|
average_field:
|
||||||
|
name: Average Field Name
|
||||||
|
description: >-
|
||||||
|
Custom name for the average field in the output. Defaults to "average" if not specified. Only used when include_average is enabled.
|
||||||
required: false
|
required: false
|
||||||
selector:
|
selector:
|
||||||
text:
|
text:
|
||||||
|
|
@ -250,23 +327,24 @@ get_chartdata:
|
||||||
collapsed: true
|
collapsed: true
|
||||||
fields:
|
fields:
|
||||||
array_fields:
|
array_fields:
|
||||||
|
name: Array Fields
|
||||||
|
description: >-
|
||||||
|
Choose extra fields to include using {field} syntax, comma-separated.
|
||||||
|
Available: start_time, price_per_kwh, level, rating_level, average.
|
||||||
|
Empty = default (timestamp + price).
|
||||||
required: false
|
required: false
|
||||||
selector:
|
selector:
|
||||||
text:
|
text:
|
||||||
refresh_user_data:
|
refresh_user_data:
|
||||||
|
name: Refresh User Data
|
||||||
|
description: >-
|
||||||
|
Forces a refresh of the user data (homes, profile information) from the Tibber API. This can be useful after making changes to your Tibber account or when troubleshooting connectivity issues.
|
||||||
fields:
|
fields:
|
||||||
entry_id:
|
entry_id:
|
||||||
|
name: Entry ID
|
||||||
|
description: The config entry ID for the Tibber integration.
|
||||||
required: true
|
required: true
|
||||||
example: "1234567890abcdef"
|
example: "1234567890abcdef"
|
||||||
selector:
|
selector:
|
||||||
config_entry:
|
config_entry:
|
||||||
integration: tibber_prices
|
integration: tibber_prices
|
||||||
|
|
||||||
debug_clear_tomorrow:
|
|
||||||
fields:
|
|
||||||
entry_id:
|
|
||||||
required: false
|
|
||||||
example: "1234567890abcdef"
|
|
||||||
selector:
|
|
||||||
config_entry:
|
|
||||||
integration: tibber_prices
|
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,6 @@ This package provides service endpoints for external integrations and data expor
|
||||||
- Chart data export (get_chartdata)
|
- Chart data export (get_chartdata)
|
||||||
- ApexCharts YAML generation (get_apexcharts_yaml)
|
- ApexCharts YAML generation (get_apexcharts_yaml)
|
||||||
- User data refresh (refresh_user_data)
|
- User data refresh (refresh_user_data)
|
||||||
- Debug: Clear tomorrow data (debug_clear_tomorrow) - DevContainer only
|
|
||||||
|
|
||||||
Architecture:
|
Architecture:
|
||||||
- helpers.py: Common utilities (get_entry_and_data)
|
- helpers.py: Common utilities (get_entry_and_data)
|
||||||
|
|
@ -13,13 +12,11 @@ Architecture:
|
||||||
- chartdata.py: Main data export service handler
|
- chartdata.py: Main data export service handler
|
||||||
- apexcharts.py: ApexCharts card YAML generator
|
- apexcharts.py: ApexCharts card YAML generator
|
||||||
- refresh_user_data.py: User data refresh handler
|
- refresh_user_data.py: User data refresh handler
|
||||||
- debug_clear_tomorrow.py: Debug tool for testing tomorrow refresh (dev only)
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import os
|
|
||||||
from typing import TYPE_CHECKING
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import DOMAIN
|
from custom_components.tibber_prices.const import DOMAIN
|
||||||
|
|
@ -45,9 +42,6 @@ __all__ = [
|
||||||
"async_setup_services",
|
"async_setup_services",
|
||||||
]
|
]
|
||||||
|
|
||||||
# Check if running in development mode (DevContainer)
|
|
||||||
_IS_DEV_MODE = os.environ.get("TIBBER_PRICES_DEV") == "1"
|
|
||||||
|
|
||||||
|
|
||||||
@callback
|
@callback
|
||||||
def async_setup_services(hass: HomeAssistant) -> None:
|
def async_setup_services(hass: HomeAssistant) -> None:
|
||||||
|
|
@ -80,19 +74,3 @@ def async_setup_services(hass: HomeAssistant) -> None:
|
||||||
schema=REFRESH_USER_DATA_SERVICE_SCHEMA,
|
schema=REFRESH_USER_DATA_SERVICE_SCHEMA,
|
||||||
supports_response=SupportsResponse.ONLY,
|
supports_response=SupportsResponse.ONLY,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Debug services - only available in DevContainer (TIBBER_PRICES_DEV=1)
|
|
||||||
if _IS_DEV_MODE:
|
|
||||||
from .debug_clear_tomorrow import ( # noqa: PLC0415 - Conditional import for dev-only service
|
|
||||||
DEBUG_CLEAR_TOMORROW_SERVICE_NAME,
|
|
||||||
DEBUG_CLEAR_TOMORROW_SERVICE_SCHEMA,
|
|
||||||
handle_debug_clear_tomorrow,
|
|
||||||
)
|
|
||||||
|
|
||||||
hass.services.async_register(
|
|
||||||
DOMAIN,
|
|
||||||
DEBUG_CLEAR_TOMORROW_SERVICE_NAME,
|
|
||||||
handle_debug_clear_tomorrow,
|
|
||||||
schema=DEBUG_CLEAR_TOMORROW_SERVICE_SCHEMA,
|
|
||||||
supports_response=SupportsResponse.ONLY,
|
|
||||||
)
|
|
||||||
|
|
|
||||||
|
|
@ -1,238 +0,0 @@
|
||||||
"""
|
|
||||||
Debug service to clear tomorrow's data from the interval pool.
|
|
||||||
|
|
||||||
This service is intended for testing the tomorrow data refresh cycle without
|
|
||||||
having to wait for the next day or restart Home Assistant.
|
|
||||||
|
|
||||||
WARNING: This is a debug/development tool. Use with caution in production.
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
service: tibber_prices.debug_clear_tomorrow
|
|
||||||
data: {}
|
|
||||||
|
|
||||||
After calling this service:
|
|
||||||
1. The tomorrow data will be removed from the interval pool
|
|
||||||
2. The lifecycle sensor will show "searching_tomorrow" (after 13:00)
|
|
||||||
3. The next Timer #1 cycle will fetch tomorrow data from the API
|
|
||||||
4. You can observe the full refresh cycle in real-time
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import logging
|
|
||||||
from datetime import datetime, timedelta
|
|
||||||
from typing import TYPE_CHECKING, Any
|
|
||||||
|
|
||||||
import voluptuous as vol
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import DOMAIN
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from custom_components.tibber_prices.coordinator import TibberPricesDataUpdateCoordinator
|
|
||||||
from homeassistant.core import ServiceCall, ServiceResponse
|
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
DEBUG_CLEAR_TOMORROW_SERVICE_NAME = "debug_clear_tomorrow"
|
|
||||||
DEBUG_CLEAR_TOMORROW_SERVICE_SCHEMA = vol.Schema(
|
|
||||||
{
|
|
||||||
vol.Optional("entry_id"): str,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def handle_debug_clear_tomorrow(call: ServiceCall) -> ServiceResponse:
|
|
||||||
"""
|
|
||||||
Handle the debug_clear_tomorrow service call.
|
|
||||||
|
|
||||||
Removes tomorrow's intervals from the interval pool to allow testing
|
|
||||||
of the tomorrow data refresh cycle.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Dict with operation results (intervals removed, pool stats before/after).
|
|
||||||
|
|
||||||
"""
|
|
||||||
hass = call.hass
|
|
||||||
|
|
||||||
# Get entry_id from call data or use first available
|
|
||||||
entry_id = call.data.get("entry_id")
|
|
||||||
|
|
||||||
if entry_id:
|
|
||||||
entry = next(
|
|
||||||
(e for e in hass.config_entries.async_entries(DOMAIN) if e.entry_id == entry_id),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# Use first available entry
|
|
||||||
entries = hass.config_entries.async_entries(DOMAIN)
|
|
||||||
entry = entries[0] if entries else None
|
|
||||||
|
|
||||||
if not entry or not hasattr(entry, "runtime_data") or not entry.runtime_data:
|
|
||||||
return {"success": False, "error": "No valid config entry found"}
|
|
||||||
|
|
||||||
coordinator: TibberPricesDataUpdateCoordinator = entry.runtime_data.coordinator
|
|
||||||
|
|
||||||
# Get pool manager from coordinator
|
|
||||||
pool = coordinator._price_data_manager._interval_pool # noqa: SLF001
|
|
||||||
|
|
||||||
# Get stats before
|
|
||||||
stats_before = pool.get_pool_stats()
|
|
||||||
|
|
||||||
# Calculate tomorrow's date range
|
|
||||||
now = coordinator.time.now()
|
|
||||||
now_local = coordinator.time.as_local(now)
|
|
||||||
tomorrow_start = (now_local + timedelta(days=1)).replace(hour=0, minute=0, second=0, microsecond=0)
|
|
||||||
tomorrow_end = (now_local + timedelta(days=2)).replace(hour=0, minute=0, second=0, microsecond=0)
|
|
||||||
|
|
||||||
_LOGGER.info(
|
|
||||||
"DEBUG: Clearing tomorrow's data from pool (range: %s to %s)",
|
|
||||||
tomorrow_start.isoformat(),
|
|
||||||
tomorrow_end.isoformat(),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Remove tomorrow's intervals from the pool index
|
|
||||||
removed_count = await _clear_intervals_in_range(pool, tomorrow_start.isoformat(), tomorrow_end.isoformat())
|
|
||||||
|
|
||||||
# Also remove tomorrow's intervals from coordinator.data["priceInfo"]
|
|
||||||
# This ensures sensors show "unknown" for tomorrow data
|
|
||||||
removed_from_coordinator = _clear_intervals_from_coordinator(coordinator, tomorrow_start, tomorrow_end)
|
|
||||||
|
|
||||||
# Get stats after
|
|
||||||
stats_after = pool.get_pool_stats()
|
|
||||||
|
|
||||||
# Force coordinator to re-check tomorrow data status and update ALL sensors
|
|
||||||
# This updates the lifecycle sensor and makes tomorrow sensors show "unknown"
|
|
||||||
coordinator.async_update_listeners()
|
|
||||||
|
|
||||||
result: dict[str, Any] = {
|
|
||||||
"success": True,
|
|
||||||
"intervals_removed_from_pool": removed_count,
|
|
||||||
"intervals_removed_from_coordinator": removed_from_coordinator,
|
|
||||||
"tomorrow_range": {
|
|
||||||
"start": tomorrow_start.isoformat(),
|
|
||||||
"end": tomorrow_end.isoformat(),
|
|
||||||
},
|
|
||||||
"pool_stats_before": {
|
|
||||||
"cache_intervals_total": stats_before.get("cache_intervals_total"),
|
|
||||||
"cache_newest_interval": stats_before.get("cache_newest_interval"),
|
|
||||||
},
|
|
||||||
"pool_stats_after": {
|
|
||||||
"cache_intervals_total": stats_after.get("cache_intervals_total"),
|
|
||||||
"cache_newest_interval": stats_after.get("cache_newest_interval"),
|
|
||||||
},
|
|
||||||
"message": f"Removed {removed_count} tomorrow intervals. Next Timer #1 cycle will fetch new data.",
|
|
||||||
}
|
|
||||||
|
|
||||||
_LOGGER.info("DEBUG: Clear tomorrow complete - %s", result)
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def _clear_intervals_from_coordinator(
|
|
||||||
coordinator: TibberPricesDataUpdateCoordinator,
|
|
||||||
start_dt: datetime,
|
|
||||||
end_dt: datetime,
|
|
||||||
) -> int:
|
|
||||||
"""
|
|
||||||
Remove intervals from coordinator.data["priceInfo"] in the given time range.
|
|
||||||
|
|
||||||
This ensures sensors show "unknown" for the removed intervals.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
coordinator: TibberPricesDataUpdateCoordinator instance.
|
|
||||||
start_dt: Start datetime (inclusive).
|
|
||||||
end_dt: End datetime (exclusive).
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Number of intervals removed.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if not coordinator.data or "priceInfo" not in coordinator.data:
|
|
||||||
return 0
|
|
||||||
|
|
||||||
price_info = coordinator.data["priceInfo"]
|
|
||||||
original_count = len(price_info)
|
|
||||||
|
|
||||||
# Filter out intervals in the range
|
|
||||||
# Intervals have startsAt as datetime objects (after parse_all_timestamps)
|
|
||||||
filtered = []
|
|
||||||
for interval in price_info:
|
|
||||||
starts_at = interval.get("startsAt")
|
|
||||||
if starts_at is None:
|
|
||||||
filtered.append(interval)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Handle both datetime and string formats
|
|
||||||
starts_at_dt = datetime.fromisoformat(starts_at) if isinstance(starts_at, str) else starts_at
|
|
||||||
|
|
||||||
# Keep intervals outside the removal range
|
|
||||||
if starts_at_dt < start_dt or starts_at_dt >= end_dt:
|
|
||||||
filtered.append(interval)
|
|
||||||
|
|
||||||
# Update coordinator.data in place
|
|
||||||
coordinator.data["priceInfo"] = filtered
|
|
||||||
|
|
||||||
removed_count = original_count - len(filtered)
|
|
||||||
_LOGGER.debug(
|
|
||||||
"DEBUG: Removed %d intervals from coordinator.data (had %d, now %d)",
|
|
||||||
removed_count,
|
|
||||||
original_count,
|
|
||||||
len(filtered),
|
|
||||||
)
|
|
||||||
|
|
||||||
return removed_count
|
|
||||||
|
|
||||||
|
|
||||||
async def _clear_intervals_in_range(
|
|
||||||
pool: Any,
|
|
||||||
start_iso: str,
|
|
||||||
end_iso: str,
|
|
||||||
) -> int:
|
|
||||||
"""
|
|
||||||
Remove intervals in the given time range from the pool.
|
|
||||||
|
|
||||||
This manipulates the pool's internal cache to remove specific intervals.
|
|
||||||
Used only for debug/testing purposes.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
pool: IntervalPoolManager instance.
|
|
||||||
start_iso: ISO timestamp string (inclusive).
|
|
||||||
end_iso: ISO timestamp string (exclusive).
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Number of intervals removed.
|
|
||||||
|
|
||||||
"""
|
|
||||||
# Access internal index
|
|
||||||
index = pool._index # noqa: SLF001
|
|
||||||
|
|
||||||
# Parse range
|
|
||||||
start_dt = datetime.fromisoformat(start_iso)
|
|
||||||
end_dt = datetime.fromisoformat(end_iso)
|
|
||||||
|
|
||||||
# Find all timestamps in range
|
|
||||||
removed_count = 0
|
|
||||||
current_dt = start_dt
|
|
||||||
|
|
||||||
while current_dt < end_dt:
|
|
||||||
current_key = current_dt.isoformat()[:19]
|
|
||||||
|
|
||||||
# Check if this timestamp exists in index
|
|
||||||
location = index.get(current_key)
|
|
||||||
if location is not None:
|
|
||||||
# Remove from index
|
|
||||||
index.remove(current_key)
|
|
||||||
removed_count += 1
|
|
||||||
|
|
||||||
# Move to next 15-min interval
|
|
||||||
current_dt += timedelta(minutes=15)
|
|
||||||
|
|
||||||
# Note: We only remove from the index, not from the fetch_groups.
|
|
||||||
# The intervals will remain in fetch_groups but won't be found via index lookup.
|
|
||||||
# This is simpler and safe - GC will clean up orphaned intervals eventually.
|
|
||||||
|
|
||||||
# Persist the updated pool state via manager's save method
|
|
||||||
await pool._auto_save_pool_state() # noqa: SLF001
|
|
||||||
|
|
||||||
return removed_count
|
|
||||||
|
|
@ -20,12 +20,9 @@ Used by:
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from datetime import datetime, time
|
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import (
|
from custom_components.tibber_prices.const import (
|
||||||
CONF_AVERAGE_SENSOR_DISPLAY,
|
|
||||||
DEFAULT_AVERAGE_SENSOR_DISPLAY,
|
|
||||||
DEFAULT_PRICE_RATING_THRESHOLD_HIGH,
|
DEFAULT_PRICE_RATING_THRESHOLD_HIGH,
|
||||||
DEFAULT_PRICE_RATING_THRESHOLD_LOW,
|
DEFAULT_PRICE_RATING_THRESHOLD_LOW,
|
||||||
get_translation,
|
get_translation,
|
||||||
|
|
@ -34,7 +31,6 @@ from custom_components.tibber_prices.coordinator.helpers import (
|
||||||
get_intervals_for_day_offsets,
|
get_intervals_for_day_offsets,
|
||||||
)
|
)
|
||||||
from custom_components.tibber_prices.sensor.helpers import aggregate_level_data, aggregate_rating_data
|
from custom_components.tibber_prices.sensor.helpers import aggregate_level_data, aggregate_rating_data
|
||||||
from custom_components.tibber_prices.utils.average import calculate_mean, calculate_median
|
|
||||||
|
|
||||||
|
|
||||||
def normalize_level_filter(value: list[str] | None) -> list[str] | None:
|
def normalize_level_filter(value: list[str] | None) -> list[str] | None:
|
||||||
|
|
@ -51,106 +47,13 @@ def normalize_rating_level_filter(value: list[str] | None) -> list[str] | None:
|
||||||
return [v.upper() for v in value]
|
return [v.upper() for v in value]
|
||||||
|
|
||||||
|
|
||||||
def aggregate_to_hourly( # noqa: PLR0912
|
|
||||||
intervals: list[dict],
|
|
||||||
coordinator: Any,
|
|
||||||
threshold_low: float = DEFAULT_PRICE_RATING_THRESHOLD_LOW,
|
|
||||||
threshold_high: float = DEFAULT_PRICE_RATING_THRESHOLD_HIGH,
|
|
||||||
) -> list[dict]:
|
|
||||||
"""
|
|
||||||
Aggregate 15-minute intervals to hourly using rolling 5-interval window.
|
|
||||||
|
|
||||||
Preserves original field names (startsAt, total, level, rating_level) so the
|
|
||||||
aggregated data can be processed by the same code path as interval data.
|
|
||||||
|
|
||||||
Uses the same methodology as sensor rolling hour calculations:
|
|
||||||
- 5-interval window: 2 before + center + 2 after (60 minutes total)
|
|
||||||
- Center interval is at :00 of each hour
|
|
||||||
- Respects user's CONF_AVERAGE_SENSOR_DISPLAY setting (mean vs median)
|
|
||||||
|
|
||||||
Example for 10:00 data point:
|
|
||||||
- Window includes: 09:30, 09:45, 10:00, 10:15, 10:30
|
|
||||||
|
|
||||||
Args:
|
|
||||||
intervals: List of 15-minute price intervals with startsAt, total, level, rating_level
|
|
||||||
coordinator: Data update coordinator instance
|
|
||||||
threshold_low: Rating level threshold (low/normal boundary)
|
|
||||||
threshold_high: Rating level threshold (normal/high boundary)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of hourly data points with same structure as input (startsAt, total, level, rating_level)
|
|
||||||
|
|
||||||
"""
|
|
||||||
if not intervals:
|
|
||||||
return []
|
|
||||||
|
|
||||||
# Get user's average display preference (mean or median)
|
|
||||||
average_display = coordinator.config_entry.options.get(CONF_AVERAGE_SENSOR_DISPLAY, DEFAULT_AVERAGE_SENSOR_DISPLAY)
|
|
||||||
use_median = average_display == "median"
|
|
||||||
|
|
||||||
hourly_data = []
|
|
||||||
|
|
||||||
# Iterate through all intervals, only process those at :00
|
|
||||||
for i, interval in enumerate(intervals):
|
|
||||||
start_time = interval.get("startsAt")
|
|
||||||
|
|
||||||
if not start_time:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Check if this is the start of an hour (:00)
|
|
||||||
if start_time.minute != 0:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Collect 5-interval rolling window: -2, -1, 0, +1, +2
|
|
||||||
window_prices: list[float] = []
|
|
||||||
window_intervals: list[dict] = []
|
|
||||||
|
|
||||||
for offset in range(-2, 3): # -2, -1, 0, +1, +2
|
|
||||||
target_idx = i + offset
|
|
||||||
if 0 <= target_idx < len(intervals):
|
|
||||||
target_interval = intervals[target_idx]
|
|
||||||
price = target_interval.get("total")
|
|
||||||
if price is not None:
|
|
||||||
window_prices.append(price)
|
|
||||||
window_intervals.append(target_interval)
|
|
||||||
|
|
||||||
# Calculate aggregated price based on user preference
|
|
||||||
if window_prices:
|
|
||||||
aggregated_price = calculate_median(window_prices) if use_median else calculate_mean(window_prices)
|
|
||||||
|
|
||||||
if aggregated_price is None:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Build data point with original field names
|
|
||||||
data_point: dict[str, Any] = {
|
|
||||||
"startsAt": start_time,
|
|
||||||
"total": aggregated_price,
|
|
||||||
}
|
|
||||||
|
|
||||||
# Add aggregated level
|
|
||||||
if window_intervals:
|
|
||||||
aggregated_level = aggregate_level_data(window_intervals)
|
|
||||||
if aggregated_level:
|
|
||||||
data_point["level"] = aggregated_level.upper()
|
|
||||||
|
|
||||||
# Add aggregated rating_level
|
|
||||||
if window_intervals:
|
|
||||||
aggregated_rating = aggregate_rating_data(window_intervals, threshold_low, threshold_high)
|
|
||||||
if aggregated_rating:
|
|
||||||
data_point["rating_level"] = aggregated_rating.upper()
|
|
||||||
|
|
||||||
hourly_data.append(data_point)
|
|
||||||
|
|
||||||
return hourly_data
|
|
||||||
|
|
||||||
|
|
||||||
def aggregate_hourly_exact( # noqa: PLR0913, PLR0912, PLR0915
|
def aggregate_hourly_exact( # noqa: PLR0913, PLR0912, PLR0915
|
||||||
intervals: list[dict],
|
intervals: list[dict],
|
||||||
start_time_field: str,
|
start_time_field: str,
|
||||||
price_field: str,
|
price_field: str,
|
||||||
*,
|
*,
|
||||||
coordinator: Any,
|
coordinator: Any,
|
||||||
use_subunit_currency: bool = False,
|
use_minor_currency: bool = False,
|
||||||
round_decimals: int | None = None,
|
round_decimals: int | None = None,
|
||||||
include_level: bool = False,
|
include_level: bool = False,
|
||||||
include_rating_level: bool = False,
|
include_rating_level: bool = False,
|
||||||
|
|
@ -176,7 +79,7 @@ def aggregate_hourly_exact( # noqa: PLR0913, PLR0912, PLR0915
|
||||||
start_time_field: Custom name for start time field
|
start_time_field: Custom name for start time field
|
||||||
price_field: Custom name for price field
|
price_field: Custom name for price field
|
||||||
coordinator: Data update coordinator instance (required)
|
coordinator: Data update coordinator instance (required)
|
||||||
use_subunit_currency: Convert to subunit currency units (cents/øre)
|
use_minor_currency: Convert to minor currency units (cents/øre)
|
||||||
round_decimals: Optional decimal rounding
|
round_decimals: Optional decimal rounding
|
||||||
include_level: Include aggregated level field
|
include_level: Include aggregated level field
|
||||||
include_rating_level: Include aggregated rating_level field
|
include_rating_level: Include aggregated rating_level field
|
||||||
|
|
@ -256,8 +159,8 @@ def aggregate_hourly_exact( # noqa: PLR0913, PLR0912, PLR0915
|
||||||
if hour_intervals:
|
if hour_intervals:
|
||||||
avg_price = sum(hour_intervals) / len(hour_intervals)
|
avg_price = sum(hour_intervals) / len(hour_intervals)
|
||||||
|
|
||||||
# Convert to subunit currency (cents/øre) if requested
|
# Convert to minor currency (cents/øre) if requested
|
||||||
avg_price = round(avg_price * 100, 2) if use_subunit_currency else round(avg_price, 4)
|
avg_price = round(avg_price * 100, 2) if use_minor_currency else round(avg_price, 4)
|
||||||
|
|
||||||
# Apply custom rounding if specified
|
# Apply custom rounding if specified
|
||||||
if round_decimals is not None:
|
if round_decimals is not None:
|
||||||
|
|
@ -300,7 +203,7 @@ def get_period_data( # noqa: PLR0913, PLR0912, PLR0915
|
||||||
period_filter: str,
|
period_filter: str,
|
||||||
days: list[str],
|
days: list[str],
|
||||||
output_format: str,
|
output_format: str,
|
||||||
subunit_currency: bool,
|
minor_currency: bool,
|
||||||
round_decimals: int | None,
|
round_decimals: int | None,
|
||||||
level_filter: list[str] | None,
|
level_filter: list[str] | None,
|
||||||
rating_level_filter: list[str] | None,
|
rating_level_filter: list[str] | None,
|
||||||
|
|
@ -312,7 +215,6 @@ def get_period_data( # noqa: PLR0913, PLR0912, PLR0915
|
||||||
level_field: str,
|
level_field: str,
|
||||||
rating_level_field: str,
|
rating_level_field: str,
|
||||||
data_key: str,
|
data_key: str,
|
||||||
insert_nulls: str,
|
|
||||||
add_trailing_null: bool,
|
add_trailing_null: bool,
|
||||||
) -> dict[str, Any]:
|
) -> dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
|
|
@ -321,15 +223,15 @@ def get_period_data( # noqa: PLR0913, PLR0912, PLR0915
|
||||||
When period_filter is specified, returns the precomputed period summaries
|
When period_filter is specified, returns the precomputed period summaries
|
||||||
from the coordinator instead of filtering intervals.
|
from the coordinator instead of filtering intervals.
|
||||||
|
|
||||||
Note: Period prices (price_median) are stored in base currency units (€/kr/$/£).
|
Note: Period prices (price_avg) are stored in minor currency units (ct/øre).
|
||||||
They are converted to subunit currency units (ct/øre/¢/p) if subunit_currency=True.
|
They are converted to major currency unless minor_currency=True.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
coordinator: Data coordinator with period summaries
|
coordinator: Data coordinator with period summaries
|
||||||
period_filter: "best_price" or "peak_price"
|
period_filter: "best_price" or "peak_price"
|
||||||
days: List of days to include
|
days: List of days to include
|
||||||
output_format: "array_of_objects" or "array_of_arrays"
|
output_format: "array_of_objects" or "array_of_arrays"
|
||||||
subunit_currency: If False, convert prices from minor to major units
|
minor_currency: If False, convert prices from minor to major units
|
||||||
round_decimals: Optional decimal rounding
|
round_decimals: Optional decimal rounding
|
||||||
level_filter: Optional level filter
|
level_filter: Optional level filter
|
||||||
rating_level_filter: Optional rating level filter
|
rating_level_filter: Optional rating level filter
|
||||||
|
|
@ -341,7 +243,6 @@ def get_period_data( # noqa: PLR0913, PLR0912, PLR0915
|
||||||
level_field: Custom name for level field
|
level_field: Custom name for level field
|
||||||
rating_level_field: Custom name for rating_level field
|
rating_level_field: Custom name for rating_level field
|
||||||
data_key: Top-level key name in response
|
data_key: Top-level key name in response
|
||||||
insert_nulls: NULL insertion mode ('none', 'segments', 'all')
|
|
||||||
add_trailing_null: Whether to add trailing null point
|
add_trailing_null: Whether to add trailing null point
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
|
|
@ -370,44 +271,11 @@ def get_period_data( # noqa: PLR0913, PLR0912, PLR0915
|
||||||
day_intervals = get_intervals_for_day_offsets(coordinator.data, offsets)
|
day_intervals = get_intervals_for_day_offsets(coordinator.data, offsets)
|
||||||
allowed_dates = {interval["startsAt"].date() for interval in day_intervals if interval.get("startsAt")}
|
allowed_dates = {interval["startsAt"].date() for interval in day_intervals if interval.get("startsAt")}
|
||||||
|
|
||||||
# Calculate day boundaries for trimming
|
# Filter periods to those within allowed dates
|
||||||
# Find min/max dates to determine the overall requested window
|
for period in period_summaries:
|
||||||
if allowed_dates:
|
start = period.get("start")
|
||||||
min_date = min(allowed_dates)
|
if start and start.date() in allowed_dates:
|
||||||
max_date = max(allowed_dates)
|
filtered_periods.append(period)
|
||||||
|
|
||||||
# CRITICAL: Trim periods that span day boundaries
|
|
||||||
# Window start = midnight of first requested day
|
|
||||||
# Window end = midnight of day AFTER last requested day (exclusive boundary)
|
|
||||||
window_start = datetime.combine(min_date, time.min)
|
|
||||||
window_end = datetime.combine(max_date, time.max).replace(microsecond=999999)
|
|
||||||
|
|
||||||
# Make timezone-aware using coordinator's time service
|
|
||||||
window_start = coordinator.time.as_local(window_start)
|
|
||||||
window_end = coordinator.time.as_local(window_end)
|
|
||||||
|
|
||||||
# Filter and trim periods to window
|
|
||||||
for period in period_summaries:
|
|
||||||
start = period.get("start")
|
|
||||||
end = period.get("end")
|
|
||||||
|
|
||||||
if not start:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Skip periods that end before window or start after window
|
|
||||||
if end and end <= window_start:
|
|
||||||
continue
|
|
||||||
if start >= window_end:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Trim period to window boundaries
|
|
||||||
trimmed_period = period.copy()
|
|
||||||
if start < window_start:
|
|
||||||
trimmed_period["start"] = window_start
|
|
||||||
if end and end > window_end:
|
|
||||||
trimmed_period["end"] = window_end
|
|
||||||
|
|
||||||
filtered_periods.append(trimmed_period)
|
|
||||||
else:
|
else:
|
||||||
filtered_periods = period_summaries
|
filtered_periods = period_summaries
|
||||||
|
|
||||||
|
|
@ -428,7 +296,7 @@ def get_period_data( # noqa: PLR0913, PLR0912, PLR0915
|
||||||
# Build data point based on output format
|
# Build data point based on output format
|
||||||
if output_format == "array_of_objects":
|
if output_format == "array_of_objects":
|
||||||
# Map period fields to custom field names
|
# Map period fields to custom field names
|
||||||
# Period has: start, end, level, rating_level, price_mean, price_median, price_min, price_max
|
# Period has: start, end, level, rating_level, price_avg, price_min, price_max
|
||||||
data_point = {}
|
data_point = {}
|
||||||
|
|
||||||
# Start time
|
# Start time
|
||||||
|
|
@ -439,17 +307,14 @@ def get_period_data( # noqa: PLR0913, PLR0912, PLR0915
|
||||||
end = period.get("end")
|
end = period.get("end")
|
||||||
data_point[end_time_field] = end.isoformat() if end and hasattr(end, "isoformat") else end
|
data_point[end_time_field] = end.isoformat() if end and hasattr(end, "isoformat") else end
|
||||||
|
|
||||||
# Price (use price_median from period for visual consistency with sensor states)
|
# Price (use price_avg from period, stored in minor units)
|
||||||
# Median is more representative than mean for periods with gap tolerance
|
price_avg = period.get("price_avg", 0.0)
|
||||||
# (single "normal" intervals between cheap/expensive ones don't skew the display)
|
# Convert to major currency unless minor_currency=True
|
||||||
price_median = period.get("price_median", 0.0)
|
if not minor_currency:
|
||||||
# Convert to subunit currency if subunit_currency=True (periods stored in base currency)
|
price_avg = price_avg / 100
|
||||||
if subunit_currency:
|
if round_decimals is not None:
|
||||||
price_median = price_median * 100
|
price_avg = round(price_avg, round_decimals)
|
||||||
# Apply rounding: use round_decimals if provided, otherwise default precision
|
data_point[price_field] = price_avg
|
||||||
precision = round_decimals if round_decimals is not None else (2 if subunit_currency else 4)
|
|
||||||
price_median = round(price_median, precision)
|
|
||||||
data_point[price_field] = price_median
|
|
||||||
|
|
||||||
# Level (only if requested and present)
|
# Level (only if requested and present)
|
||||||
if include_level and "level" in period:
|
if include_level and "level" in period:
|
||||||
|
|
@ -462,38 +327,18 @@ def get_period_data( # noqa: PLR0913, PLR0912, PLR0915
|
||||||
chart_data.append(data_point)
|
chart_data.append(data_point)
|
||||||
|
|
||||||
else: # array_of_arrays
|
else: # array_of_arrays
|
||||||
# For array_of_arrays, include 2-3 points per period depending on insert_nulls:
|
# For array_of_arrays, include: [start, price_avg]
|
||||||
# Always:
|
price_avg = period.get("price_avg", 0.0)
|
||||||
# 1. Start time with price (begin period)
|
# Convert to major currency unless minor_currency=True
|
||||||
# 2. End time with price (hold price until end)
|
if not minor_currency:
|
||||||
# If insert_nulls='segments' or 'all':
|
price_avg = price_avg / 100
|
||||||
# 3. End time with NULL (cleanly terminate segment for ApexCharts)
|
if round_decimals is not None:
|
||||||
# Use price_median for consistency with sensor states (more representative for periods)
|
price_avg = round(price_avg, round_decimals)
|
||||||
price_median = period.get("price_median", 0.0)
|
|
||||||
# Convert to subunit currency if subunit_currency=True (periods stored in base currency)
|
|
||||||
if subunit_currency:
|
|
||||||
price_median = price_median * 100
|
|
||||||
# Apply rounding: use round_decimals if provided, otherwise default precision
|
|
||||||
precision = round_decimals if round_decimals is not None else (2 if subunit_currency else 4)
|
|
||||||
price_median = round(price_median, precision)
|
|
||||||
start = period["start"]
|
start = period["start"]
|
||||||
end = period.get("end")
|
|
||||||
start_serialized = start.isoformat() if hasattr(start, "isoformat") else start
|
start_serialized = start.isoformat() if hasattr(start, "isoformat") else start
|
||||||
end_serialized = end.isoformat() if end and hasattr(end, "isoformat") else end
|
chart_data.append([start_serialized, price_avg])
|
||||||
|
|
||||||
# Add data points per period
|
# Add trailing null point if requested
|
||||||
chart_data.append([start_serialized, price_median]) # 1. Start with price
|
|
||||||
if end_serialized:
|
|
||||||
chart_data.append([end_serialized, price_median]) # 2. End with price (hold level)
|
|
||||||
# 3. Add NULL terminator only if insert_nulls is enabled
|
|
||||||
if insert_nulls in ("segments", "all"):
|
|
||||||
chart_data.append([end_serialized, None]) # 3. End with NULL (terminate segment)
|
|
||||||
|
|
||||||
# Add trailing null point if requested (independent of insert_nulls)
|
|
||||||
# This adds an additional NULL at the end of the entire data series.
|
|
||||||
# If both insert_nulls and add_trailing_null are enabled, you get:
|
|
||||||
# - NULL terminator after each period (from insert_nulls)
|
|
||||||
# - Additional NULL at the very end (from add_trailing_null)
|
|
||||||
if add_trailing_null and chart_data:
|
if add_trailing_null and chart_data:
|
||||||
if output_format == "array_of_objects":
|
if output_format == "array_of_objects":
|
||||||
null_point = {start_time_field: None, end_time_field: None}
|
null_point = {start_time_field: None, end_time_field: None}
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -145,14 +145,12 @@ async def handle_get_price(call: ServiceCall) -> ServiceResponse:
|
||||||
|
|
||||||
# Call the interval pool to get intervals (with intelligent caching)
|
# Call the interval pool to get intervals (with intelligent caching)
|
||||||
# Single-home architecture: pool knows its home_id, no parameter needed
|
# Single-home architecture: pool knows its home_id, no parameter needed
|
||||||
price_info, _api_called = await pool.get_intervals(
|
price_info = await pool.get_intervals(
|
||||||
api_client=api_client,
|
api_client=api_client,
|
||||||
user_data=user_data,
|
user_data=user_data,
|
||||||
start_time=start_time,
|
start_time=start_time,
|
||||||
end_time=end_time,
|
end_time=end_time,
|
||||||
)
|
)
|
||||||
# Note: We ignore api_called flag here - service always returns requested data
|
|
||||||
# regardless of whether it came from cache or was fetched fresh from API
|
|
||||||
|
|
||||||
except Exception as error:
|
except Exception as error:
|
||||||
_LOGGER.exception("Error fetching price data")
|
_LOGGER.exception("Error fetching price data")
|
||||||
|
|
|
||||||
|
|
@ -1,38 +0,0 @@
|
||||||
"""
|
|
||||||
Switch platform for Tibber Prices integration.
|
|
||||||
|
|
||||||
Provides configurable switch entities for runtime overrides of Best Price
|
|
||||||
and Peak Price period calculation boolean settings (enable_min_periods).
|
|
||||||
|
|
||||||
When enabled, these entities take precedence over the options flow settings.
|
|
||||||
When disabled (default), the options flow settings are used.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from typing import TYPE_CHECKING
|
|
||||||
|
|
||||||
from .core import TibberPricesConfigSwitch
|
|
||||||
from .definitions import SWITCH_ENTITY_DESCRIPTIONS
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from custom_components.tibber_prices.data import TibberPricesConfigEntry
|
|
||||||
from homeassistant.core import HomeAssistant
|
|
||||||
from homeassistant.helpers.entity_platform import AddEntitiesCallback
|
|
||||||
|
|
||||||
|
|
||||||
async def async_setup_entry(
|
|
||||||
_hass: HomeAssistant,
|
|
||||||
entry: TibberPricesConfigEntry,
|
|
||||||
async_add_entities: AddEntitiesCallback,
|
|
||||||
) -> None:
|
|
||||||
"""Set up Tibber Prices switch entities based on a config entry."""
|
|
||||||
coordinator = entry.runtime_data.coordinator
|
|
||||||
|
|
||||||
async_add_entities(
|
|
||||||
TibberPricesConfigSwitch(
|
|
||||||
coordinator=coordinator,
|
|
||||||
entity_description=entity_description,
|
|
||||||
)
|
|
||||||
for entity_description in SWITCH_ENTITY_DESCRIPTIONS
|
|
||||||
)
|
|
||||||
|
|
@ -1,245 +0,0 @@
|
||||||
"""
|
|
||||||
Switch entity implementation for Tibber Prices configuration overrides.
|
|
||||||
|
|
||||||
These entities allow runtime configuration of boolean period calculation settings.
|
|
||||||
When a config entity is enabled, its value takes precedence over the
|
|
||||||
options flow setting for period calculations.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import logging
|
|
||||||
from typing import TYPE_CHECKING, Any
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.const import (
|
|
||||||
DOMAIN,
|
|
||||||
get_home_type_translation,
|
|
||||||
get_translation,
|
|
||||||
)
|
|
||||||
from homeassistant.components.switch import SwitchEntity
|
|
||||||
from homeassistant.core import callback
|
|
||||||
from homeassistant.helpers.device_registry import DeviceEntryType, DeviceInfo
|
|
||||||
from homeassistant.helpers.restore_state import RestoreEntity
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from custom_components.tibber_prices.coordinator import (
|
|
||||||
TibberPricesDataUpdateCoordinator,
|
|
||||||
)
|
|
||||||
|
|
||||||
from .definitions import TibberPricesSwitchEntityDescription
|
|
||||||
|
|
||||||
_LOGGER = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class TibberPricesConfigSwitch(RestoreEntity, SwitchEntity):
|
|
||||||
"""
|
|
||||||
A switch entity for configuring boolean period calculation settings at runtime.
|
|
||||||
|
|
||||||
When this entity is enabled, its value overrides the corresponding
|
|
||||||
options flow setting. When disabled (default), the options flow
|
|
||||||
setting is used for period calculations.
|
|
||||||
|
|
||||||
The entity restores its value after Home Assistant restart.
|
|
||||||
"""
|
|
||||||
|
|
||||||
_attr_has_entity_name = True
|
|
||||||
entity_description: TibberPricesSwitchEntityDescription
|
|
||||||
|
|
||||||
# Exclude all attributes from recorder history - config entities don't need history
|
|
||||||
_unrecorded_attributes = frozenset(
|
|
||||||
{
|
|
||||||
"description",
|
|
||||||
"long_description",
|
|
||||||
"usage_tips",
|
|
||||||
"friendly_name",
|
|
||||||
"icon",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
coordinator: TibberPricesDataUpdateCoordinator,
|
|
||||||
entity_description: TibberPricesSwitchEntityDescription,
|
|
||||||
) -> None:
|
|
||||||
"""Initialize the config switch entity."""
|
|
||||||
self.coordinator = coordinator
|
|
||||||
self.entity_description = entity_description
|
|
||||||
|
|
||||||
# Set unique ID
|
|
||||||
self._attr_unique_id = (
|
|
||||||
f"{coordinator.config_entry.unique_id or coordinator.config_entry.entry_id}_{entity_description.key}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize with None - will be set in async_added_to_hass
|
|
||||||
self._attr_is_on: bool | None = None
|
|
||||||
|
|
||||||
# Setup device info
|
|
||||||
self._setup_device_info()
|
|
||||||
|
|
||||||
def _setup_device_info(self) -> None:
|
|
||||||
"""Set up device information."""
|
|
||||||
home_name, home_id, home_type = self._get_device_info()
|
|
||||||
language = self.coordinator.hass.config.language or "en"
|
|
||||||
translated_model = get_home_type_translation(home_type, language) if home_type else "Unknown"
|
|
||||||
|
|
||||||
self._attr_device_info = DeviceInfo(
|
|
||||||
entry_type=DeviceEntryType.SERVICE,
|
|
||||||
identifiers={
|
|
||||||
(
|
|
||||||
DOMAIN,
|
|
||||||
self.coordinator.config_entry.unique_id or self.coordinator.config_entry.entry_id,
|
|
||||||
)
|
|
||||||
},
|
|
||||||
name=home_name,
|
|
||||||
manufacturer="Tibber",
|
|
||||||
model=translated_model,
|
|
||||||
serial_number=home_id if home_id else None,
|
|
||||||
configuration_url="https://developer.tibber.com/explorer",
|
|
||||||
)
|
|
||||||
|
|
||||||
def _get_device_info(self) -> tuple[str, str | None, str | None]:
|
|
||||||
"""Get device name, ID and type."""
|
|
||||||
user_profile = self.coordinator.get_user_profile()
|
|
||||||
is_subentry = bool(self.coordinator.config_entry.data.get("home_id"))
|
|
||||||
home_id = self.coordinator.config_entry.unique_id
|
|
||||||
home_type = None
|
|
||||||
|
|
||||||
if is_subentry:
|
|
||||||
home_data = self.coordinator.config_entry.data.get("home_data", {})
|
|
||||||
home_id = self.coordinator.config_entry.data.get("home_id")
|
|
||||||
address = home_data.get("address", {})
|
|
||||||
address1 = address.get("address1", "")
|
|
||||||
city = address.get("city", "")
|
|
||||||
app_nickname = home_data.get("appNickname", "")
|
|
||||||
home_type = home_data.get("type", "")
|
|
||||||
|
|
||||||
if app_nickname and app_nickname.strip():
|
|
||||||
home_name = app_nickname.strip()
|
|
||||||
elif address1:
|
|
||||||
home_name = address1
|
|
||||||
if city:
|
|
||||||
home_name = f"{home_name}, {city}"
|
|
||||||
else:
|
|
||||||
home_name = f"Tibber Home {home_id[:8]}" if home_id else "Tibber Home"
|
|
||||||
elif user_profile:
|
|
||||||
home_name = user_profile.get("name") or "Tibber Home"
|
|
||||||
else:
|
|
||||||
home_name = "Tibber Home"
|
|
||||||
|
|
||||||
return home_name, home_id, home_type
|
|
||||||
|
|
||||||
async def async_added_to_hass(self) -> None:
|
|
||||||
"""Handle entity which was added to Home Assistant."""
|
|
||||||
await super().async_added_to_hass()
|
|
||||||
|
|
||||||
# Try to restore previous state
|
|
||||||
last_state = await self.async_get_last_state()
|
|
||||||
if last_state is not None and last_state.state in ("on", "off"):
|
|
||||||
self._attr_is_on = last_state.state == "on"
|
|
||||||
_LOGGER.debug(
|
|
||||||
"Restored %s value: %s",
|
|
||||||
self.entity_description.key,
|
|
||||||
self._attr_is_on,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# Initialize with value from options flow (or default)
|
|
||||||
self._attr_is_on = self._get_value_from_options()
|
|
||||||
_LOGGER.debug(
|
|
||||||
"Initialized %s from options: %s",
|
|
||||||
self.entity_description.key,
|
|
||||||
self._attr_is_on,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Register override with coordinator if entity is enabled
|
|
||||||
await self._sync_override_state()
|
|
||||||
|
|
||||||
async def async_will_remove_from_hass(self) -> None:
|
|
||||||
"""Handle entity removal from Home Assistant."""
|
|
||||||
# Remove override when entity is removed
|
|
||||||
self.coordinator.remove_config_override(
|
|
||||||
self.entity_description.config_key,
|
|
||||||
self.entity_description.config_section,
|
|
||||||
)
|
|
||||||
await super().async_will_remove_from_hass()
|
|
||||||
|
|
||||||
def _get_value_from_options(self) -> bool:
|
|
||||||
"""Get the current value from options flow or default."""
|
|
||||||
options = self.coordinator.config_entry.options
|
|
||||||
section = options.get(self.entity_description.config_section, {})
|
|
||||||
value = section.get(
|
|
||||||
self.entity_description.config_key,
|
|
||||||
self.entity_description.default_value,
|
|
||||||
)
|
|
||||||
return bool(value)
|
|
||||||
|
|
||||||
async def _sync_override_state(self) -> None:
|
|
||||||
"""Sync the override state with the coordinator based on entity enabled state."""
|
|
||||||
# Check if entity is enabled in registry
|
|
||||||
if self.registry_entry is not None and not self.registry_entry.disabled:
|
|
||||||
# Entity is enabled - register the override
|
|
||||||
if self._attr_is_on is not None:
|
|
||||||
self.coordinator.set_config_override(
|
|
||||||
self.entity_description.config_key,
|
|
||||||
self.entity_description.config_section,
|
|
||||||
self._attr_is_on,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# Entity is disabled - remove override
|
|
||||||
self.coordinator.remove_config_override(
|
|
||||||
self.entity_description.config_key,
|
|
||||||
self.entity_description.config_section,
|
|
||||||
)
|
|
||||||
|
|
||||||
async def async_turn_on(self, **_kwargs: Any) -> None:
|
|
||||||
"""Turn the switch on."""
|
|
||||||
await self._set_value(is_on=True)
|
|
||||||
|
|
||||||
async def async_turn_off(self, **_kwargs: Any) -> None:
|
|
||||||
"""Turn the switch off."""
|
|
||||||
await self._set_value(is_on=False)
|
|
||||||
|
|
||||||
async def _set_value(self, *, is_on: bool) -> None:
|
|
||||||
"""Update the current value and trigger recalculation."""
|
|
||||||
self._attr_is_on = is_on
|
|
||||||
|
|
||||||
# Update the coordinator's runtime override
|
|
||||||
self.coordinator.set_config_override(
|
|
||||||
self.entity_description.config_key,
|
|
||||||
self.entity_description.config_section,
|
|
||||||
is_on,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Trigger period recalculation (same path as options update)
|
|
||||||
await self.coordinator.async_handle_config_override_update()
|
|
||||||
|
|
||||||
_LOGGER.debug(
|
|
||||||
"Updated %s to %s, triggered period recalculation",
|
|
||||||
self.entity_description.key,
|
|
||||||
is_on,
|
|
||||||
)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def extra_state_attributes(self) -> dict[str, Any] | None:
|
|
||||||
"""Return entity state attributes with description."""
|
|
||||||
language = self.coordinator.hass.config.language or "en"
|
|
||||||
|
|
||||||
# Try to get description from custom translations
|
|
||||||
# Custom translations use direct path: switch.{key}.description
|
|
||||||
translation_path = [
|
|
||||||
"switch",
|
|
||||||
self.entity_description.translation_key or self.entity_description.key,
|
|
||||||
"description",
|
|
||||||
]
|
|
||||||
description = get_translation(translation_path, language)
|
|
||||||
|
|
||||||
attrs: dict[str, Any] = {}
|
|
||||||
if description:
|
|
||||||
attrs["description"] = description
|
|
||||||
|
|
||||||
return attrs if attrs else None
|
|
||||||
|
|
||||||
@callback
|
|
||||||
def async_registry_entry_updated(self) -> None:
|
|
||||||
"""Handle entity registry update (enabled/disabled state change)."""
|
|
||||||
# This is called when the entity is enabled/disabled in the UI
|
|
||||||
self.hass.async_create_task(self._sync_override_state())
|
|
||||||
|
|
@ -1,84 +0,0 @@
|
||||||
"""
|
|
||||||
Switch entity definitions for Tibber Prices configuration overrides.
|
|
||||||
|
|
||||||
These switch entities allow runtime configuration of boolean settings
|
|
||||||
for Best Price and Peak Price period calculations.
|
|
||||||
|
|
||||||
When enabled, the entity value takes precedence over the options flow setting.
|
|
||||||
When disabled (default), the options flow setting is used.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from dataclasses import dataclass
|
|
||||||
|
|
||||||
from homeassistant.components.switch import SwitchEntityDescription
|
|
||||||
from homeassistant.const import EntityCategory
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass(frozen=True, kw_only=True)
|
|
||||||
class TibberPricesSwitchEntityDescription(SwitchEntityDescription):
|
|
||||||
"""Describes a Tibber Prices switch entity for config overrides."""
|
|
||||||
|
|
||||||
# The config key this entity overrides (matches CONF_* constants)
|
|
||||||
config_key: str
|
|
||||||
# The section in options where this setting is stored
|
|
||||||
config_section: str
|
|
||||||
# Whether this is for best_price (False) or peak_price (True)
|
|
||||||
is_peak_price: bool = False
|
|
||||||
# Default value from const.py
|
|
||||||
default_value: bool = True
|
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# BEST PRICE PERIOD CONFIGURATION OVERRIDES (Boolean)
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
BEST_PRICE_SWITCH_ENTITIES = (
|
|
||||||
SwitchEntityDescription(
|
|
||||||
key="best_price_enable_relaxation_override",
|
|
||||||
translation_key="best_price_enable_relaxation_override",
|
|
||||||
name="Best Price: Achieve Minimum Count",
|
|
||||||
icon="mdi:arrow-down-bold-circle",
|
|
||||||
entity_category=EntityCategory.CONFIG,
|
|
||||||
entity_registry_enabled_default=False,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Custom descriptions with extra fields
|
|
||||||
BEST_PRICE_SWITCH_ENTITY_DESCRIPTIONS = (
|
|
||||||
TibberPricesSwitchEntityDescription(
|
|
||||||
key="best_price_enable_relaxation_override",
|
|
||||||
translation_key="best_price_enable_relaxation_override",
|
|
||||||
name="Best Price: Achieve Minimum Count",
|
|
||||||
icon="mdi:arrow-down-bold-circle",
|
|
||||||
entity_category=EntityCategory.CONFIG,
|
|
||||||
entity_registry_enabled_default=False,
|
|
||||||
config_key="enable_min_periods_best",
|
|
||||||
config_section="relaxation_and_target_periods",
|
|
||||||
is_peak_price=False,
|
|
||||||
default_value=True, # DEFAULT_ENABLE_MIN_PERIODS_BEST
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# PEAK PRICE PERIOD CONFIGURATION OVERRIDES (Boolean)
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
PEAK_PRICE_SWITCH_ENTITY_DESCRIPTIONS = (
|
|
||||||
TibberPricesSwitchEntityDescription(
|
|
||||||
key="peak_price_enable_relaxation_override",
|
|
||||||
translation_key="peak_price_enable_relaxation_override",
|
|
||||||
name="Peak Price: Achieve Minimum Count",
|
|
||||||
icon="mdi:arrow-up-bold-circle",
|
|
||||||
entity_category=EntityCategory.CONFIG,
|
|
||||||
entity_registry_enabled_default=False,
|
|
||||||
config_key="enable_min_periods_peak",
|
|
||||||
config_section="relaxation_and_target_periods",
|
|
||||||
is_peak_price=True,
|
|
||||||
default_value=True, # DEFAULT_ENABLE_MIN_PERIODS_PEAK
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
# All switch entity descriptions combined
|
|
||||||
SWITCH_ENTITY_DESCRIPTIONS = BEST_PRICE_SWITCH_ENTITY_DESCRIPTIONS + PEAK_PRICE_SWITCH_ENTITY_DESCRIPTIONS
|
|
||||||
|
|
@ -11,14 +11,14 @@
|
||||||
},
|
},
|
||||||
"new_token": {
|
"new_token": {
|
||||||
"title": "API-Token eingeben",
|
"title": "API-Token eingeben",
|
||||||
"description": "Richte Tibber Preisinformationen & Bewertungen ein.\n\nUm einen API-Zugriffstoken zu generieren, besuche [{tibber_url}]({tibber_url}).",
|
"description": "Richte Tibber Preisinformationen & Bewertungen ein.\n\nUm einen API-Zugriffstoken zu generieren, besuche https://developer.tibber.com.",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API-Zugriffstoken"
|
"access_token": "API-Zugriffstoken"
|
||||||
},
|
},
|
||||||
"submit": "Token validieren"
|
"submit": "Token validieren"
|
||||||
},
|
},
|
||||||
"user": {
|
"user": {
|
||||||
"description": "Richte Tibber Preisinformationen & Bewertungen ein.\n\nUm einen API-Zugriffstoken zu generieren, besuche [{tibber_url}]({tibber_url}).",
|
"description": "Richte Tibber Preisinformationen & Bewertungen ein.\n\nUm einen API-Zugriffstoken zu generieren, besuche https://developer.tibber.com.",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API-Zugriffstoken"
|
"access_token": "API-Zugriffstoken"
|
||||||
},
|
},
|
||||||
|
|
@ -42,7 +42,7 @@
|
||||||
},
|
},
|
||||||
"reauth_confirm": {
|
"reauth_confirm": {
|
||||||
"title": "Tibber Preis-Integration erneut authentifizieren",
|
"title": "Tibber Preis-Integration erneut authentifizieren",
|
||||||
"description": "Der Zugriffstoken für Tibber ist nicht mehr gültig. Bitte gib einen neuen API-Zugriffstoken ein, um diese Integration weiter zu nutzen.\n\nUm einen neuen API-Zugriffstoken zu generieren, besuche [{tibber_url}]({tibber_url}).",
|
"description": "Der Zugriffstoken für Tibber ist nicht mehr gültig. Bitte gib einen neuen API-Zugriffstoken ein, um diese Integration weiter zu nutzen.\n\nUm einen neuen API-Zugriffstoken zu generieren, besuche https://developer.tibber.com.",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API-Zugriffstoken"
|
"access_token": "API-Zugriffstoken"
|
||||||
},
|
},
|
||||||
|
|
@ -77,23 +77,7 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"common": {
|
"common": {
|
||||||
"step_progress": "{step_num} / {total_steps}",
|
"step_progress": "{step_num} / {total_steps}"
|
||||||
"override_warning_template": "⚠️ {fields} wird durch Konfigurations-Entität gesteuert",
|
|
||||||
"override_warning_and": "und",
|
|
||||||
"override_field_label_best_price_min_period_length": "Mindestperiodenlänge",
|
|
||||||
"override_field_label_best_price_max_level_gap_count": "Lückentoleranz",
|
|
||||||
"override_field_label_best_price_flex": "Flexibilität",
|
|
||||||
"override_field_label_best_price_min_distance_from_avg": "Mindestabstand",
|
|
||||||
"override_field_label_enable_min_periods_best": "Mindestzahl erreichen",
|
|
||||||
"override_field_label_min_periods_best": "Mindestperioden",
|
|
||||||
"override_field_label_relaxation_attempts_best": "Lockerungsversuche",
|
|
||||||
"override_field_label_peak_price_min_period_length": "Mindestperiodenlänge",
|
|
||||||
"override_field_label_peak_price_max_level_gap_count": "Lückentoleranz",
|
|
||||||
"override_field_label_peak_price_flex": "Flexibilität",
|
|
||||||
"override_field_label_peak_price_min_distance_from_avg": "Mindestabstand",
|
|
||||||
"override_field_label_enable_min_periods_peak": "Mindestzahl erreichen",
|
|
||||||
"override_field_label_min_periods_peak": "Mindestperioden",
|
|
||||||
"override_field_label_relaxation_attempts_peak": "Lockerungsversuche"
|
|
||||||
},
|
},
|
||||||
"config_subentries": {
|
"config_subentries": {
|
||||||
"home": {
|
"home": {
|
||||||
|
|
@ -148,82 +132,56 @@
|
||||||
"options": {
|
"options": {
|
||||||
"step": {
|
"step": {
|
||||||
"init": {
|
"init": {
|
||||||
"menu_options": {
|
|
||||||
"general_settings": "⚙️ Allgemeine Einstellungen",
|
|
||||||
"display_settings": "💱 Währungsanzeige",
|
|
||||||
"current_interval_price_rating": "📊 Preisbewertung",
|
|
||||||
"price_level": "🏷️ Preisniveau",
|
|
||||||
"volatility": "💨 Preis-Volatilität",
|
|
||||||
"best_price": "💚 Bestpreis",
|
|
||||||
"peak_price": "🔴 Spitzenpreis",
|
|
||||||
"price_trend": "📈 Preistrend",
|
|
||||||
"chart_data_export": "📊 Diagrammdaten-Export",
|
|
||||||
"reset_to_defaults": "🔄 Auf Werkseinstellungen zurücksetzen",
|
|
||||||
"finish": "⬅️ Zurück"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"general_settings": {
|
|
||||||
"title": "⚙️ Allgemeine Einstellungen",
|
"title": "⚙️ Allgemeine Einstellungen",
|
||||||
"description": "**Konfiguriere allgemeine Einstellungen für Tibber-Preisinformationen und -bewertungen.**\n\n---\n\n**Benutzer:** {user_login}",
|
"description": "_{step_progress}_\n\n**Konfiguriere allgemeine Einstellungen für Tibber-Preisinformationen und -bewertungen.**\n\n---\n\n**Benutzer:** {user_login}",
|
||||||
"data": {
|
"data": {
|
||||||
"extended_descriptions": "Erweiterte Beschreibungen",
|
"extended_descriptions": "Erweiterte Beschreibungen"
|
||||||
"average_sensor_display": "Durchschnittsensor-Anzeige"
|
|
||||||
},
|
},
|
||||||
"data_description": {
|
"data_description": {
|
||||||
"extended_descriptions": "Steuert, ob Entitätsattribute ausführliche Erklärungen und Nutzungstipps enthalten.\n\n• Deaktiviert (Standard): Nur kurze Beschreibung\n• Aktiviert: Ausführliche Erklärung + praktische Nutzungsbeispiele\n\nBeispiel:\nDeaktiviert = 1 Attribut\nAktiviert = 2 zusätzliche Attribute",
|
"extended_descriptions": "Steuert, ob Entitätsattribute ausführliche Erklärungen und Nutzungstipps enthalten.\n\n• Deaktiviert (Standard): Nur kurze Beschreibung\n• Aktiviert: Ausführliche Erklärung + praktische Nutzungsbeispiele\n\nBeispiel:\nDeaktiviert = 1 Attribut\nAktiviert = 2 zusätzliche Attribute"
|
||||||
"average_sensor_display": "Wähle aus, welcher statistische Wert im Sensorstatus für Durchschnitts-Preissensoren angezeigt wird. Der andere Wert wird als Attribut angezeigt.\n\n• **Median (Standard)**: Zeigt den 'typischen' Preis, resistent gegen Extremwerte - ideal für Anzeige und menschliche Interpretation\n• **Arithmetisches Mittel**: Zeigt den echten mathematischen Durchschnitt inkl. aller Preise - ideal für exakte Kostenberechnungen\n\nFür Automatisierungen nutze das Attribut `price_mean` oder `price_median`, um unabhängig von dieser Einstellung auf beide Werte zuzugreifen."
|
|
||||||
},
|
},
|
||||||
"submit": "↩ Speichern & Zurück"
|
"submit": "Weiter →"
|
||||||
},
|
|
||||||
"display_settings": {
|
|
||||||
"title": "💱 Währungsanzeige-Einstellungen",
|
|
||||||
"description": "**Konfiguriere, wie Strompreise angezeigt werden - in Basiswährung (€, kr) oder Unterwährungseinheit (ct, øre).**\n\n---",
|
|
||||||
"data": {
|
|
||||||
"currency_display_mode": "Anzeigemodus"
|
|
||||||
},
|
|
||||||
"data_description": {
|
|
||||||
"currency_display_mode": "Wähle, wie Preise angezeigt werden:\n\n• **Basiswährung** (€/kWh, kr/kWh): Dezimalwerte (z.B. 0,25 €/kWh) - Unterschiede sichtbar ab 3.-4. Nachkommastelle\n• **Unterwährungseinheit** (ct/kWh, øre/kWh): Größere Werte (z.B. 25,00 ct/kWh) - Unterschiede bereits ab 1. Nachkommastelle sichtbar\n\nStandard abhängig von deiner Währung:\n• EUR → Unterwährungseinheit (Cent) - deutsche/niederländische Präferenz\n• NOK/SEK/DKK → Basiswährung (Kronen) - skandinavische Präferenz\n• USD/GBP → Basiswährung\n\n**💡 Tipp:** Bei Auswahl von Unterwährungseinheit kannst du den zusätzlichen Sensor \"Aktueller Strompreis (Energie-Dashboard)\" aktivieren (standardmäßig deaktiviert)."
|
|
||||||
},
|
|
||||||
"submit": "↩ Speichern & Zurück"
|
|
||||||
},
|
},
|
||||||
"current_interval_price_rating": {
|
"current_interval_price_rating": {
|
||||||
"title": "📊 Preisbewertungs-Einstellungen",
|
"title": "📊 Preisbewertungs-Schwellenwerte",
|
||||||
"description": "**Konfiguriere Schwellenwerte und Stabilisierung für Preisbewertungsstufen (niedrig/normal/hoch) basierend auf dem Vergleich mit dem nachlaufenden 24-Stunden-Durchschnitt.**{entity_warning}",
|
"description": "_{step_progress}_\n\n**Konfiguriere Schwellenwerte für Preisbewertungsstufen (niedrig/normal/hoch) basierend auf dem Vergleich mit dem nachlaufenden 24-Stunden-Durchschnitt.**\n\n---",
|
||||||
"data": {
|
"sections": {
|
||||||
"price_rating_threshold_low": "Niedrig-Schwelle",
|
"price_rating_thresholds": {
|
||||||
"price_rating_threshold_high": "Hoch-Schwelle",
|
"name": "Preisbewertungs-Schwellenwerte",
|
||||||
"price_rating_hysteresis": "Hysterese",
|
"description": "Definiere die Einstufungen für die Preisbewertung.",
|
||||||
"price_rating_gap_tolerance": "Lücken-Toleranz"
|
"data": {
|
||||||
|
"price_rating_threshold_low": "Niedrig-Schwelle",
|
||||||
|
"price_rating_threshold_high": "Hoch-Schwelle"
|
||||||
|
},
|
||||||
|
"data_description": {
|
||||||
|
"price_rating_threshold_low": "Prozentwert, um wie viel der aktuelle Preis unter dem nachlaufenden 24-Stunden-Durchschnitt liegen muss, damit er als 'niedrig' bewertet wird. Beispiel: 5 bedeutet mindestens 5% unter Durchschnitt. Sensoren mit dieser Bewertung zeigen günstige Zeitfenster an. Standard: 5%",
|
||||||
|
"price_rating_threshold_high": "Prozentwert, um wie viel der aktuelle Preis über dem nachlaufenden 24-Stunden-Durchschnitt liegen muss, damit er als 'hoch' bewertet wird. Beispiel: 10 bedeutet mindestens 10% über Durchschnitt. Sensoren mit dieser Bewertung warnen vor teuren Zeitfenstern. Standard: 10%"
|
||||||
|
}
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"data_description": {
|
"submit": "Weiter →"
|
||||||
"price_rating_threshold_low": "Prozentwert, um wie viel der aktuelle Preis unter dem nachlaufenden 24-Stunden-Durchschnitt liegen muss, damit er als 'niedrig' bewertet wird. Beispiel: -10 bedeutet mindestens 10% unter Durchschnitt. Sensoren mit dieser Bewertung zeigen günstige Zeitfenster an. Standard: -10%",
|
|
||||||
"price_rating_threshold_high": "Prozentwert, um wie viel der aktuelle Preis über dem nachlaufenden 24-Stunden-Durchschnitt liegen muss, damit er als 'hoch' bewertet wird. Beispiel: 10 bedeutet mindestens 10% über Durchschnitt. Sensoren mit dieser Bewertung warnen vor teuren Zeitfenstern. Standard: 10%",
|
|
||||||
"price_rating_hysteresis": "Prozentband um die Schwellenwerte zur Vermeidung schneller Zustandswechsel. Wenn die Bewertung bereits NIEDRIG ist, muss der Preis über (Schwelle + Hysterese) steigen, um zu NORMAL zu wechseln. Ebenso muss bei HOCH der Preis unter (Schwelle - Hysterese) fallen, um den Zustand zu verlassen. Dies sorgt für Stabilität bei Automationen, die auf Bewertungsänderungen reagieren. Auf 0 setzen zum Deaktivieren. Standard: 2%",
|
|
||||||
"price_rating_gap_tolerance": "Maximale Anzahl aufeinanderfolgender Intervalle, die 'geglättet' werden können, wenn sie sich von den umgebenden Bewertungen unterscheiden. Kleine isolierte Bewertungsänderungen werden in den dominanten Nachbarblock integriert. Dies sorgt für Stabilität bei Automationen, indem kurze Bewertungsspitzen keine unnötigen Aktionen auslösen. Beispiel: 1 bedeutet, dass ein einzelnes 'normal'-Intervall umgeben von 'hoch'-Intervallen zu 'hoch' korrigiert wird. Auf 0 setzen zum Deaktivieren. Standard: 1"
|
|
||||||
},
|
|
||||||
"submit": "↩ Speichern & Zurück"
|
|
||||||
},
|
},
|
||||||
"best_price": {
|
"best_price": {
|
||||||
"title": "💚 Bestpreis-Zeitraum Einstellungen",
|
"title": "💚 Bestpreis-Zeitraum Einstellungen",
|
||||||
"description": "**Konfiguration für den Bestpreis-Zeitraum mit den niedrigsten Strompreisen.**{entity_warning}{override_warning}\n\n---",
|
"description": "_{step_progress}_\n\n**Konfiguration für den Bestpreis-Zeitraum mit den niedrigsten Strompreisen.**\n\n---",
|
||||||
"sections": {
|
"sections": {
|
||||||
"period_settings": {
|
"period_settings": {
|
||||||
"name": "Zeitraumdauer & Preisniveaus",
|
"name": "Zeitraumdauer & Preisniveaus",
|
||||||
"description": "Lege fest, wie lange Zeiträume sein sollen und welche Preisniveaus einbezogen werden.",
|
"description": "Legen Sie fest, wie lange Zeiträume sein sollen und welche Preisniveaus einbezogen werden.",
|
||||||
"data": {
|
"data": {
|
||||||
"best_price_min_period_length": "Minimale Zeitraumlänge",
|
"best_price_min_period_length": "Minimale Zeitraumlänge",
|
||||||
"best_price_max_level": "Preisniveau-Filter",
|
"best_price_max_level": "Preisniveau-Filter",
|
||||||
"best_price_max_level_gap_count": "Lückentoleranz"
|
"best_price_max_level_gap_count": "Lückentoleranz"
|
||||||
},
|
},
|
||||||
"data_description": {
|
"data_description": {
|
||||||
"best_price_min_period_length": "Mindestdauer für einen Zeitraum um als 'Bestpreis' zu gelten. Längere Zeiträume sind praktischer für Geräte wie Geschirrspüler oder Wärmepumpen. Bestpreis-Zeiträume benötigen mindestens 60 Minuten (vs. 30 Minuten für Spitzenlast-Warnungen), da sie aussagekräftige Zeitfenster für die Verbrauchsplanung bieten sollten.",
|
"best_price_min_period_length": "Mindestdauer für einen Zeitraum um als 'Bestpreis' zu gelten. Längere Zeiträume sind prakti scher für Geräte wie Geschirrspüler oder Wärmepumpen. Bestpreis-Zeiträume benötigen mindestens 60 Minuten (vs. 30 Minuten für Spitzenlast-Warnungen), da sie aussagekräftige Zeitfenster für die Verbrauchsplanung bieten sollten.",
|
||||||
"best_price_max_level": "Nur Bestpreis-Zeiträume anzeigen, wenn sie Intervalle mit Preisniveaus ≤ dem ausgewählten Wert enthalten. Beispiel: Auswahl '**Günstig**' bedeutet, der Zeitraum muss mindestens ein '**Sehr günstig**' oder '**Günstig**' Intervall enthalten. Dies stellt sicher, dass 'Bestpreis'-Zeiträume nicht nur relativ billig für den Tag sind, sondern auch absolut günstig. Wähle '**Alle**' um Bestpreise unabhängig vom absoluten Preisniveau anzuzeigen.",
|
"best_price_max_level": "Nur Bestpreis-Zeiträume anzeigen, wenn sie Intervalle mit Preisniveaus ≤ dem ausgewählten Wert enthalten. Beispiel: Auswahl 'Günstig' bedeutet, der Zeitraum muss mindestens ein 'SEHR_GÜNSTIG' oder 'GÜNSTIG' Intervall enthalten. Dies stellt sicher, dass 'Bestpreis'-Zeiträume nicht nur relativ billig für den Tag sind, sondern auch absolut günstig. Wählen Sie 'Alle' um Bestpreise unabhängig vom absoluten Preisniveau anzuzeigen.",
|
||||||
"best_price_max_level_gap_count": "Maximale Anzahl aufeinanderfolgender Intervalle, die um genau eine Preisstufe vom erforderlichen Niveau abweichen dürfen. Beispiel: Mit '**Günstig**'-Filter und Lückenzähler 1 wird eine Sequenz '**Günstig**, **Günstig**, **Normal**, **Günstig**' akzeptiert (**Normal** ist eine Stufe über **Günstig**). Dies verhindert, dass Zeiträume durch gelegentliche Niveauabweichungen aufgeteilt werden. **Hinweis:** Lückentoleranz erfordert Zeiträume ≥90 Minuten (6 Intervalle), um Ausreißer effektiv zu erkennen. Standard: 0 (strikte Filterung, keine Toleranz)."
|
"best_price_max_level_gap_count": "Maximale Anzahl aufeinanderfolgender Intervalle, die um genau eine Preisstufe vom erforderlichen Niveau abweichen dürfen. Beispiel: Mit 'Günstig'-Filter und Lückenzähler 1 wird eine Sequenz 'GÜNSTIG, GÜNSTIG, NORMAL, GÜNSTIG' akzeptiert (NORMAL ist eine Stufe über GÜNSTIG). Dies verhindert, dass Zeiträume durch gelegentliche Niveauabweichungen aufgeteilt werden. **Hinweis:** Lückentoleranz erfordert Zeiträume ≥90 Minuten (6 Intervalle), um Ausreißer effektiv zu erkennen. Standard: 0 (strikte Filterung, keine Toleranz)."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flexibility_settings": {
|
"flexibility_settings": {
|
||||||
"name": "Flexibilität & Schwellenwerte",
|
"name": "Flexibilität & Schwellenwerte",
|
||||||
"description": "Kontrolliere, wie sehr Preise abweichen dürfen und dennoch als 'Bestpreis' gelten.",
|
"description": "Kontrollieren Sie, wie sehr Preise abweichen dürfen und dennoch als 'Bestpreis' gelten.",
|
||||||
"data": {
|
"data": {
|
||||||
"best_price_flex": "Flexibilität",
|
"best_price_flex": "Flexibilität",
|
||||||
"best_price_min_distance_from_avg": "Mindestabstand"
|
"best_price_min_distance_from_avg": "Mindestabstand"
|
||||||
|
|
@ -235,7 +193,7 @@
|
||||||
},
|
},
|
||||||
"relaxation_and_target_periods": {
|
"relaxation_and_target_periods": {
|
||||||
"name": "Lockerung & Zielanzahl Zeiträume",
|
"name": "Lockerung & Zielanzahl Zeiträume",
|
||||||
"description": "Konfiguriere automatische Filterlockerung und Zielanzahl von Zeiträumen. Aktiviere 'Mindestanzahl anstreben' um die Lockerung zu aktivieren.",
|
"description": "Konfigurieren Sie automatische Filterlockerung und Zielanzahl von Zeiträumen. Aktivieren Sie 'Mindestanzahl anstreben' um die Lockerung zu aktivieren.",
|
||||||
"data": {
|
"data": {
|
||||||
"enable_min_periods_best": "Mindestanzahl anstreben",
|
"enable_min_periods_best": "Mindestanzahl anstreben",
|
||||||
"min_periods_best": "Mindestanzahl Zeiträume",
|
"min_periods_best": "Mindestanzahl Zeiträume",
|
||||||
|
|
@ -248,11 +206,11 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"submit": "↩ Speichern & Zurück"
|
"submit": "Weiter →"
|
||||||
},
|
},
|
||||||
"peak_price": {
|
"peak_price": {
|
||||||
"title": "🔴 Spitzenpreis-Zeitraum Einstellungen",
|
"title": "🔴 Spitzenpreis-Zeitraum Einstellungen",
|
||||||
"description": "**Konfiguration für den Spitzenpreis-Zeitraum mit den höchsten Strompreisen.**{entity_warning}{override_warning}\n\n---",
|
"description": "_{step_progress}_\n\n**Konfiguration für den Spitzenpreis-Zeitraum mit den höchsten Strompreisen.**\n\n---",
|
||||||
"sections": {
|
"sections": {
|
||||||
"period_settings": {
|
"period_settings": {
|
||||||
"name": "Zeitraum-Einstellungen",
|
"name": "Zeitraum-Einstellungen",
|
||||||
|
|
@ -264,8 +222,8 @@
|
||||||
},
|
},
|
||||||
"data_description": {
|
"data_description": {
|
||||||
"peak_price_min_period_length": "Minimale Dauer, damit ein Zeitraum als 'Spitzenpreis' gilt. Spitzenpreis-Warnungen sind für kürzere Zeiträume zulässig (mindestens 30 Minuten im Vergleich zu 60 Minuten für Bestpreis), da kurze teure Spitzen eine Warnung wert sind, auch wenn sie für die Verbrauchsplanung zu kurz sind.",
|
"peak_price_min_period_length": "Minimale Dauer, damit ein Zeitraum als 'Spitzenpreis' gilt. Spitzenpreis-Warnungen sind für kürzere Zeiträume zulässig (mindestens 30 Minuten im Vergleich zu 60 Minuten für Bestpreis), da kurze teure Spitzen eine Warnung wert sind, auch wenn sie für die Verbrauchsplanung zu kurz sind.",
|
||||||
"peak_price_min_level": "Zeigt Spitzenpreis-Zeiträume nur an, wenn sie Intervalle mit Preisniveaus ≥ dem gewählten Wert enthalten. Beispiel: Wahl von '**Teuer**' bedeutet, dass der Zeitraum mindestens ein '**Teuer**' oder '**Sehr teuer**' Intervall haben muss. Dies stellt sicher, dass Spitzenpreis-Zeiträume nicht nur relativ teuer für den Tag sind, sondern tatsächlich teuer in absoluten Zahlen. Wähle '**Beliebig**' um Spitzenpreise unabhängig vom absoluten Preisniveau anzuzeigen.",
|
"peak_price_min_level": "Zeigt Spitzenpreis-Zeiträume nur an, wenn sie Intervalle mit Preisniveaus ≥ dem gewählten Wert enthalten. Beispiel: Wahl von 'Teuer' bedeutet, dass der Zeitraum mindestens ein 'TEUER' oder 'SEHR TEUER' Intervall haben muss. Dies stellt sicher, dass Spitzenpreis-Zeiträume nicht nur relativ teuer für den Tag sind, sondern tatsächlich teuer in absoluten Zahlen. Wähle 'Beliebig' um Spitzenpreise unabhängig vom absoluten Preisniveau anzuzeigen.",
|
||||||
"peak_price_max_level_gap_count": "Maximale Anzahl aufeinanderfolgender Intervalle, die exakt um eine Niveaustufe vom geforderten Level abweichen dürfen. Beispiel: Bei Filter '**Teuer**' und Lückentoleranz 2 wird die Sequenz '**Teuer**, **Normal**, **Normal**, **Teuer**' akzeptiert (**Normal** ist eine Stufe unter **Teuer**). Dies verhindert, dass Zeiträume durch gelegentliche Niveau-Abweichungen aufgespalten werden. **Hinweis:** Lückentoleranz erfordert Zeiträume ≥90 Minuten (6 Intervalle), um Ausreißer effektiv zu erkennen. Standard: 0 (strenge Filterung, keine Toleranz)."
|
"peak_price_max_level_gap_count": "Maximale Anzahl aufeinanderfolgender Intervalle, die exakt um eine Niveaustufe vom geforderten Level abweichen dürfen. Beispiel: Bei Filter 'Teuer' und Lückentoleranz 2 wird die Sequenz 'TEUER, NORMAL, NORMAL, TEUER' akzeptiert (NORMAL ist eine Stufe unter TEUER). Dies verhindert, dass Zeiträume durch gelegentliche Niveau-Abweichungen aufgespalten werden. **Hinweis:** Lückentoleranz erfordert Zeiträume ≥90 Minuten (6 Intervalle), um Ausreißer effektiv zu erkennen. Standard: 0 (strenge Filterung, keine Toleranz)."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flexibility_settings": {
|
"flexibility_settings": {
|
||||||
|
|
@ -295,63 +253,52 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"submit": "↩ Speichern & Zurück"
|
"submit": "Weiter →"
|
||||||
},
|
},
|
||||||
"price_trend": {
|
"price_trend": {
|
||||||
"title": "📈 Preistrend-Schwellenwerte",
|
"title": "📈 Preistrend-Schwellenwerte",
|
||||||
"description": "**Konfiguriere Schwellenwerte für Preistrend-Sensoren.** Diese Sensoren vergleichen den aktuellen Preis mit dem Durchschnitt der nächsten N Stunden, um festzustellen, ob die Preise steigen, fallen oder stabil sind.\n\n**5-Stufen-Skala:** Nutzt stark_fallend (-2), fallend (-1), stabil (0), steigend (+1), stark_steigend (+2) für Automations-Vergleiche über das trend_value Attribut.{entity_warning}",
|
"description": "_{step_progress}_\n\n**Konfiguriere Schwellenwerte für Preistrend-Sensoren. Diese Sensoren vergleichen den aktuellen Preis mit dem Durchschnitt der nächsten N Stunden, um festzustellen, ob die Preise steigen, fallen oder stabil sind.**\n\n---",
|
||||||
"data": {
|
"sections": {
|
||||||
"price_trend_threshold_rising": "Steigend-Schwelle",
|
"price_trend_thresholds": {
|
||||||
"price_trend_threshold_strongly_rising": "Stark steigend-Schwelle",
|
"name": "Preistrend-Schwellenwerte",
|
||||||
"price_trend_threshold_falling": "Fallend-Schwelle",
|
"description": "Definiere die Einstufungen für den Preistrend.",
|
||||||
"price_trend_threshold_strongly_falling": "Stark fallend-Schwelle"
|
"data": {
|
||||||
|
"price_trend_threshold_rising": "Steigend-Schwelle",
|
||||||
|
"price_trend_threshold_falling": "Fallend-Schwelle"
|
||||||
|
},
|
||||||
|
"data_description": {
|
||||||
|
"price_trend_threshold_rising": "Prozentwert, um wie viel der Durchschnitt der nächsten N Stunden über dem aktuellen Preis liegen muss, damit der Trend als 'steigend' gilt. Beispiel: 5 bedeutet Durchschnitt ist mindestens 5% höher → Preise werden steigen. Typische Werte: 5-15%. Standard: 5%",
|
||||||
|
"price_trend_threshold_falling": "Prozentwert (negativ), um wie viel der Durchschnitt der nächsten N Stunden unter dem aktuellen Preis liegen muss, damit der Trend als 'fallend' gilt. Beispiel: -5 bedeutet Durchschnitt ist mindestens 5% niedriger → Preise werden fallen. Typische Werte: -5 bis -15%. Standard: -5%"
|
||||||
|
}
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"data_description": {
|
"submit": "Weiter →"
|
||||||
"price_trend_threshold_rising": "Prozentwert, um wie viel der Durchschnitt der nächsten N Stunden über dem aktuellen Preis liegen muss, damit der Trend als 'steigend' gilt. Beispiel: 3 bedeutet Durchschnitt ist mindestens 3% höher → Preise werden steigen. Typische Werte: 3-10%. Standard: 3%",
|
|
||||||
"price_trend_threshold_strongly_rising": "Prozentwert für 'stark steigend'-Trend. Muss höher sein als die steigend-Schwelle. Beispiel: 6 bedeutet Durchschnitt ist mindestens 6% höher → Preise werden deutlich steigen. Typische Werte: 6-15%. Standard: 6%",
|
|
||||||
"price_trend_threshold_falling": "Prozentwert (negativ), um wie viel der Durchschnitt der nächsten N Stunden unter dem aktuellen Preis liegen muss, damit der Trend als 'fallend' gilt. Beispiel: -3 bedeutet Durchschnitt ist mindestens 3% niedriger → Preise werden fallen. Typische Werte: -3 bis -10%. Standard: -3%",
|
|
||||||
"price_trend_threshold_strongly_falling": "Prozentwert (negativ) für 'stark fallend'-Trend. Muss niedriger (negativer) sein als die fallend-Schwelle. Beispiel: -6 bedeutet Durchschnitt ist mindestens 6% niedriger → Preise werden deutlich fallen. Typische Werte: -6 bis -15%. Standard: -6%"
|
|
||||||
},
|
|
||||||
"submit": "↩ Speichern & Zurück"
|
|
||||||
},
|
},
|
||||||
"volatility": {
|
"volatility": {
|
||||||
"title": "💨 Volatilität Schwellenwerte",
|
"title": "💨 Volatilität Schwellenwerte",
|
||||||
"description": "**Konfiguriere Schwellenwerte für die Volatilitätsklassifizierung.** Volatilität misst relative Preisschwankungen anhand des Variationskoeffizienten (VK = Standardabweichung / Durchschnitt × 100%). Diese Schwellenwerte sind Prozentwerte, die für alle Preisniveaus funktionieren.\n\nVerwendet von:\n• Volatilitätssensoren (Klassifizierung)\n• Trend-Sensoren (adaptive Schwellenanpassung: <moderat = empfindlicher, ≥hoch = weniger empfindlich){entity_warning}",
|
"description": "_{step_progress}_\n\n**Konfiguriere Schwellenwerte für die Volatilitätsklassifizierung.** Volatilität misst relative Preisschwankungen anhand des Variationskoeffizienten (VK = Standardabweichung / Durchschnitt × 100%). Diese Schwellenwerte sind Prozentwerte, die für alle Preisniveaus funktionieren.\n\nVerwendet von:\n• Volatilitätssensoren (Klassifizierung)\n• Trend-Sensoren (adaptive Schwellenanpassung: <moderat = empfindlicher, ≥hoch = weniger empfindlich)\n\n---",
|
||||||
"data": {
|
"sections": {
|
||||||
"volatility_threshold_moderate": "Moderat-Schwelle",
|
"volatility_thresholds": {
|
||||||
"volatility_threshold_high": "Hoch-Schwelle",
|
"name": "Volatilitätsschwellen",
|
||||||
"volatility_threshold_very_high": "Sehr hoch-Schwelle"
|
"description": "Definiere Volatilitäts-Klassifizierungsstufen.",
|
||||||
|
"data": {
|
||||||
|
"volatility_threshold_moderate": "Moderat-Schwelle",
|
||||||
|
"volatility_threshold_high": "Hoch-Schwelle",
|
||||||
|
"volatility_threshold_very_high": "Sehr hoch-Schwelle"
|
||||||
|
},
|
||||||
|
"data_description": {
|
||||||
|
"volatility_threshold_moderate": "Variationskoeffizient (VK) ab dem Preise als 'moderat volatil' gelten. VK = (Standardabweichung / Durchschnitt) × 100%. Beispiel: 15 bedeutet Preisschwankungen von ±15% um den Durchschnitt. Sensoren zeigen diese Klassifizierung an, Trend-Sensoren werden empfindlicher. Standard: 15%",
|
||||||
|
"volatility_threshold_high": "Variationskoeffizient (VK) ab dem Preise als 'hoch volatil' gelten. Beispiel: 30 bedeutet Preisschwankungen von ±30% um den Durchschnitt. Größere Preissprünge erwartet, Trend-Sensoren werden weniger empfindlich. Standard: 30%",
|
||||||
|
"volatility_threshold_very_high": "Variationskoeffizient (VK) ab dem Preise als 'sehr hoch volatil' gelten. Beispiel: 50 bedeutet extreme Preisschwankungen von ±50% um den Durchschnitt. An solchen Tagen sind starke Preisspitzen wahrscheinlich. Standard: 50%"
|
||||||
|
}
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"data_description": {
|
"submit": "Weiter →"
|
||||||
"volatility_threshold_moderate": "Variationskoeffizient (VK) ab dem Preise als 'moderat volatil' gelten. VK = (Standardabweichung / Durchschnitt) × 100%. Beispiel: 15 bedeutet Preisschwankungen von ±15% um den Durchschnitt. Sensoren zeigen diese Klassifizierung an, Trend-Sensoren werden empfindlicher. Standard: 15%",
|
|
||||||
"volatility_threshold_high": "Variationskoeffizient (VK) ab dem Preise als 'hoch volatil' gelten. Beispiel: 30 bedeutet Preisschwankungen von ±30% um den Durchschnitt. Größere Preissprünge erwartet, Trend-Sensoren werden weniger empfindlich. Standard: 30%",
|
|
||||||
"volatility_threshold_very_high": "Variationskoeffizient (VK) ab dem Preise als 'sehr hoch volatil' gelten. Beispiel: 50 bedeutet extreme Preisschwankungen von ±50% um den Durchschnitt. An solchen Tagen sind starke Preisspitzen wahrscheinlich. Standard: 50%"
|
|
||||||
},
|
|
||||||
"submit": "↩ Speichern & Zurück"
|
|
||||||
},
|
},
|
||||||
"chart_data_export": {
|
"chart_data_export": {
|
||||||
"title": "📊 Chart Data Export Sensor",
|
"title": "📊 Chart Data Export Sensor",
|
||||||
"description": "Der Chart Data Export Sensor stellt Preisdaten als Sensor-Attribute zur Verfügung.\n\n⚠️ **Hinweis:** Dieser Sensor ist ein Legacy-Feature für Kompatibilität mit älteren Tools.\n\n**Für neue Setups empfohlen:** Nutze den `tibber_prices.get_chartdata` **Service direkt** - er ist flexibler, effizienter und der moderne Home Assistant-Ansatz.\n\n**Wann dieser Sensor sinnvoll ist:**\n\n✅ Dein Dashboard-Tool kann **nur** Attribute lesen (keine Service-Aufrufe)\n✅ Du brauchst statische Daten, die automatisch aktualisiert werden\n❌ **Nicht für Automationen:** Nutze dort direkt `tibber_prices.get_chartdata` - flexibler und effizienter!\n\n---\n\n{sensor_status_info}",
|
"description": "_{step_progress}_\n\nDer Chart Data Export Sensor stellt Preisdaten als Sensor-Attribute zur Verfügung.\n\n⚠️ **Hinweis:** Dieser Sensor ist ein Legacy-Feature für Kompatibilität mit älteren Tools.\n\n**Für neue Setups empfohlen:** Nutze den `tibber_prices.get_chartdata` **Service direkt** - er ist flexibler, effizienter und der moderne Home Assistant-Ansatz.\n\n**Wann dieser Sensor sinnvoll ist:**\n\n✅ Dein Dashboard-Tool kann **nur** Attribute lesen (keine Service-Aufrufe)\n✅ Du brauchst statische Daten, die automatisch aktualisiert werden\n❌ **Nicht für Automationen:** Nutze dort direkt `tibber_prices.get_chartdata` - flexibler und effizienter!\n\n---\n\n**Sensor aktivieren:**\n\n1. Öffne **Einstellungen → Geräte & Dienste → Tibber Prices**\n2. Wähle dein Home → Finde **'Chart Data Export'** (Diagnose-Bereich)\n3. **Aktiviere den Sensor** (standardmäßig deaktiviert)\n\n**Konfiguration (optional):**\n\nStandardeinstellung funktioniert sofort (heute+morgen, 15-Minuten-Intervalle, reine Preise).\n\nFür Anpassungen füge in **`configuration.yaml`** ein:\n\n```yaml\ntibber_prices:\n chart_export:\n day:\n - today\n - tomorrow\n include_level: true\n include_rating_level: true\n```\n\n**Alle Parameter:** Siehe `tibber_prices.get_chartdata` Service-Dokumentation",
|
||||||
"submit": "↩ Ok & Zurück"
|
"submit": "Abschließen ✓"
|
||||||
},
|
|
||||||
"reset_to_defaults": {
|
|
||||||
"title": "🔄 Auf Werkseinstellungen zurücksetzen",
|
|
||||||
"description": "⚠️ **Warnung:** Dies setzt **ALLE** Einstellungen auf Werkseinstellungen zurück.\n\n**Was wird zurückgesetzt:**\n• Alle Preisbewertungs-Schwellwerte\n• Alle Volatilitäts-Schwellwerte\n• Alle Preistrend-Schwellwerte\n• Alle Einstellungen für Best-Price-Perioden\n• Alle Einstellungen für Peak-Price-Perioden\n• Anzeigeeinstellungen\n• Allgemeine Einstellungen\n\n**Was wird NICHT zurückgesetzt:**\n• Dein Tibber API-Token\n• Ausgewähltes Zuhause\n• Währung\n\n**💡 Tipp:** Nützlich, wenn du nach dem Experimentieren mit Einstellungen neu beginnen möchtest.",
|
|
||||||
"data": {
|
|
||||||
"confirm_reset": "Ja, alles auf Werkseinstellungen zurücksetzen"
|
|
||||||
},
|
|
||||||
"submit": "Jetzt zurücksetzen"
|
|
||||||
},
|
|
||||||
"price_level": {
|
|
||||||
"title": "🏷️ Preisniveau-Einstellungen (von Tibber API)",
|
|
||||||
"description": "**Konfiguriere die Stabilisierung für Tibbers Preisniveau-Klassifizierung (sehr günstig/günstig/normal/teuer/sehr teuer).**\n\nTibbers API liefert ein Preisniveau-Feld für jedes Intervall. Diese Einstellung glättet kurze Schwankungen, um Instabilität in Automatisierungen zu verhindern.{entity_warning}",
|
|
||||||
"data": {
|
|
||||||
"price_level_gap_tolerance": "Gap-Toleranz"
|
|
||||||
},
|
|
||||||
"data_description": {
|
|
||||||
"price_level_gap_tolerance": "Maximale Anzahl aufeinanderfolgender Intervalle, die 'geglättet' werden können, wenn sie von umgebenden Preisniveaus abweichen. Kleine isolierte Niveauänderungen werden mit dem dominanten Nachbarblock zusammengeführt. Beispiel: 1 bedeutet, dass ein einzelnes 'normal'-Intervall, umgeben von 'günstig'-Intervallen, zu 'günstig' korrigiert wird. Auf 0 setzen zum Deaktivieren. Standard: 1"
|
|
||||||
},
|
|
||||||
"submit": "↩ Speichern & Zurück"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"error": {
|
"error": {
|
||||||
|
|
@ -361,10 +308,10 @@
|
||||||
"cannot_connect": "Verbindung fehlgeschlagen",
|
"cannot_connect": "Verbindung fehlgeschlagen",
|
||||||
"invalid_access_token": "Ungültiges Zugriffstoken",
|
"invalid_access_token": "Ungültiges Zugriffstoken",
|
||||||
"different_home": "Der Zugriffstoken ist nicht gültig für die Home ID, für die diese Integration konfiguriert ist.",
|
"different_home": "Der Zugriffstoken ist nicht gültig für die Home ID, für die diese Integration konfiguriert ist.",
|
||||||
"invalid_flex": "Flexibilitätsprozentsatz muss zwischen -50% und +50% liegen",
|
"invalid_flex": "TRANSLATE: Flexibility percentage must be between -50% and +50%",
|
||||||
"invalid_best_price_distance": "Distanzprozentsatz muss zwischen -50% und 0% liegen (negativ = unter Durchschnitt)",
|
"invalid_best_price_distance": "TRANSLATE: Distance percentage must be between -50% and 0% (negative = below average)",
|
||||||
"invalid_peak_price_distance": "Distanzprozentsatz muss zwischen 0% und 50% liegen (positiv = über Durchschnitt)",
|
"invalid_peak_price_distance": "TRANSLATE: Distance percentage must be between 0% and 50% (positive = above average)",
|
||||||
"invalid_min_periods": "Mindestanzahl der Zeiträume muss zwischen 1 und 10 liegen",
|
"invalid_min_periods": "TRANSLATE: Minimum periods count must be between 1 and 10",
|
||||||
"invalid_period_length": "Die Periodenlänge muss mindestens 15 Minuten betragen (Vielfache von 15).",
|
"invalid_period_length": "Die Periodenlänge muss mindestens 15 Minuten betragen (Vielfache von 15).",
|
||||||
"invalid_gap_count": "Lückentoleranz muss zwischen 0 und 8 liegen",
|
"invalid_gap_count": "Lückentoleranz muss zwischen 0 und 8 liegen",
|
||||||
"invalid_relaxation_attempts": "Lockerungsversuche müssen zwischen 1 und 12 liegen",
|
"invalid_relaxation_attempts": "Lockerungsversuche müssen zwischen 1 und 12 liegen",
|
||||||
|
|
@ -376,25 +323,22 @@
|
||||||
"invalid_volatility_threshold_very_high": "Sehr hohe Volatilitätsschwelle muss zwischen 35% und 80% liegen",
|
"invalid_volatility_threshold_very_high": "Sehr hohe Volatilitätsschwelle muss zwischen 35% und 80% liegen",
|
||||||
"invalid_volatility_thresholds": "Schwellenwerte müssen aufsteigend sein: moderat < hoch < sehr hoch",
|
"invalid_volatility_thresholds": "Schwellenwerte müssen aufsteigend sein: moderat < hoch < sehr hoch",
|
||||||
"invalid_price_trend_rising": "Steigender Trendschwellenwert muss zwischen 1% und 50% liegen",
|
"invalid_price_trend_rising": "Steigender Trendschwellenwert muss zwischen 1% und 50% liegen",
|
||||||
"invalid_price_trend_falling": "Fallender Trendschwellenwert muss zwischen -50% und -1% liegen",
|
"invalid_price_trend_falling": "Fallender Trendschwellenwert muss zwischen -50% und -1% liegen"
|
||||||
"invalid_price_trend_strongly_rising": "Stark steigender Trendschwellenwert muss zwischen 2% und 100% liegen",
|
|
||||||
"invalid_price_trend_strongly_falling": "Stark fallender Trendschwellenwert muss zwischen -100% und -2% liegen",
|
|
||||||
"invalid_trend_strongly_rising_less_than_rising": "Stark steigend-Schwelle muss größer als steigend-Schwelle sein",
|
|
||||||
"invalid_trend_strongly_falling_greater_than_falling": "Stark fallend-Schwelle muss kleiner (negativer) als fallend-Schwelle sein"
|
|
||||||
},
|
},
|
||||||
"abort": {
|
"abort": {
|
||||||
"entry_not_found": "Tibber Konfigurationseintrag nicht gefunden.",
|
"entry_not_found": "Tibber Konfigurationseintrag nicht gefunden."
|
||||||
"reset_cancelled": "Zurücksetzen abgebrochen. Es wurden keine Änderungen an deiner Konfiguration vorgenommen.",
|
},
|
||||||
"reset_successful": "✅ Alle Einstellungen wurden auf Werkseinstellungen zurückgesetzt. Deine Konfiguration ist jetzt wie bei einer frischen Installation.",
|
"best_price_flex": "Bestpreis Flexibilität (%)",
|
||||||
"finished": "Konfiguration abgeschlossen."
|
"peak_price_flex": "Spitzenpreis Flexibilität (%)",
|
||||||
}
|
"price_rating_threshold_low": "Niedriger Preis Schwellenwert (% zum gleitenden Durchschnitt)",
|
||||||
|
"price_rating_threshold_high": "Hoher Preis Schwellenwert (% zum gleitenden Durchschnitt)"
|
||||||
},
|
},
|
||||||
"entity": {
|
"entity": {
|
||||||
"sensor": {
|
"sensor": {
|
||||||
"current_interval_price": {
|
"current_interval_price": {
|
||||||
"name": "Aktueller Strompreis"
|
"name": "Aktueller Strompreis"
|
||||||
},
|
},
|
||||||
"current_interval_price_base": {
|
"current_interval_price_major": {
|
||||||
"name": "Aktueller Strompreis (Energie-Dashboard)"
|
"name": "Aktueller Strompreis (Energie-Dashboard)"
|
||||||
},
|
},
|
||||||
"next_interval_price": {
|
"next_interval_price": {
|
||||||
|
|
@ -616,91 +560,73 @@
|
||||||
"price_trend_1h": {
|
"price_trend_1h": {
|
||||||
"name": "Preistrend (1h)",
|
"name": "Preistrend (1h)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Stark steigend",
|
|
||||||
"rising": "Steigend",
|
"rising": "Steigend",
|
||||||
"stable": "Stabil",
|
|
||||||
"falling": "Fallend",
|
"falling": "Fallend",
|
||||||
"strongly_falling": "Stark fallend"
|
"stable": "Stabil"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_2h": {
|
"price_trend_2h": {
|
||||||
"name": "Preistrend (2h)",
|
"name": "Preistrend (2h)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Stark steigend",
|
|
||||||
"rising": "Steigend",
|
"rising": "Steigend",
|
||||||
"stable": "Stabil",
|
|
||||||
"falling": "Fallend",
|
"falling": "Fallend",
|
||||||
"strongly_falling": "Stark fallend"
|
"stable": "Stabil"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_3h": {
|
"price_trend_3h": {
|
||||||
"name": "Preistrend (3h)",
|
"name": "Preistrend (3h)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Stark steigend",
|
|
||||||
"rising": "Steigend",
|
"rising": "Steigend",
|
||||||
"stable": "Stabil",
|
|
||||||
"falling": "Fallend",
|
"falling": "Fallend",
|
||||||
"strongly_falling": "Stark fallend"
|
"stable": "Stabil"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_4h": {
|
"price_trend_4h": {
|
||||||
"name": "Preistrend (4h)",
|
"name": "Preistrend (4h)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Stark steigend",
|
|
||||||
"rising": "Steigend",
|
"rising": "Steigend",
|
||||||
"stable": "Stabil",
|
|
||||||
"falling": "Fallend",
|
"falling": "Fallend",
|
||||||
"strongly_falling": "Stark fallend"
|
"stable": "Stabil"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_5h": {
|
"price_trend_5h": {
|
||||||
"name": "Preistrend (5h)",
|
"name": "Preistrend (5h)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Stark steigend",
|
|
||||||
"rising": "Steigend",
|
"rising": "Steigend",
|
||||||
"stable": "Stabil",
|
|
||||||
"falling": "Fallend",
|
"falling": "Fallend",
|
||||||
"strongly_falling": "Stark fallend"
|
"stable": "Stabil"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_6h": {
|
"price_trend_6h": {
|
||||||
"name": "Preistrend (6h)",
|
"name": "Preistrend (6h)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Stark steigend",
|
|
||||||
"rising": "Steigend",
|
"rising": "Steigend",
|
||||||
"stable": "Stabil",
|
|
||||||
"falling": "Fallend",
|
"falling": "Fallend",
|
||||||
"strongly_falling": "Stark fallend"
|
"stable": "Stabil"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_8h": {
|
"price_trend_8h": {
|
||||||
"name": "Preistrend (8h)",
|
"name": "Preistrend (8h)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Stark steigend",
|
|
||||||
"rising": "Steigend",
|
"rising": "Steigend",
|
||||||
"stable": "Stabil",
|
|
||||||
"falling": "Fallend",
|
"falling": "Fallend",
|
||||||
"strongly_falling": "Stark fallend"
|
"stable": "Stabil"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_12h": {
|
"price_trend_12h": {
|
||||||
"name": "Preistrend (12h)",
|
"name": "Preistrend (12h)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Stark steigend",
|
|
||||||
"rising": "Steigend",
|
"rising": "Steigend",
|
||||||
"stable": "Stabil",
|
|
||||||
"falling": "Fallend",
|
"falling": "Fallend",
|
||||||
"strongly_falling": "Stark fallend"
|
"stable": "Stabil"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"current_price_trend": {
|
"current_price_trend": {
|
||||||
"name": "Aktueller Preistrend",
|
"name": "Aktueller Preistrend",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Stark steigend",
|
|
||||||
"rising": "Steigend",
|
"rising": "Steigend",
|
||||||
"stable": "Stabil",
|
|
||||||
"falling": "Fallend",
|
"falling": "Fallend",
|
||||||
"strongly_falling": "Stark fallend"
|
"stable": "Stabil"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"next_price_trend_change": {
|
"next_price_trend_change": {
|
||||||
|
|
@ -873,14 +799,6 @@
|
||||||
"ready": "Bereit",
|
"ready": "Bereit",
|
||||||
"error": "Fehler"
|
"error": "Fehler"
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"chart_metadata": {
|
|
||||||
"name": "Diagramm-Metadaten",
|
|
||||||
"state": {
|
|
||||||
"pending": "Ausstehend",
|
|
||||||
"ready": "Bereit",
|
|
||||||
"error": "Fehler"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"binary_sensor": {
|
"binary_sensor": {
|
||||||
|
|
@ -902,52 +820,6 @@
|
||||||
"realtime_consumption_enabled": {
|
"realtime_consumption_enabled": {
|
||||||
"name": "Echtzeitverbrauch aktiviert"
|
"name": "Echtzeitverbrauch aktiviert"
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"number": {
|
|
||||||
"best_price_flex_override": {
|
|
||||||
"name": "Bestpreis: Flexibilität"
|
|
||||||
},
|
|
||||||
"best_price_min_distance_override": {
|
|
||||||
"name": "Bestpreis: Mindestabstand"
|
|
||||||
},
|
|
||||||
"best_price_min_period_length_override": {
|
|
||||||
"name": "Bestpreis: Mindestperiodenlänge"
|
|
||||||
},
|
|
||||||
"best_price_min_periods_override": {
|
|
||||||
"name": "Bestpreis: Mindestperioden"
|
|
||||||
},
|
|
||||||
"best_price_relaxation_attempts_override": {
|
|
||||||
"name": "Bestpreis: Lockerungsversuche"
|
|
||||||
},
|
|
||||||
"best_price_gap_count_override": {
|
|
||||||
"name": "Bestpreis: Lückentoleranz"
|
|
||||||
},
|
|
||||||
"peak_price_flex_override": {
|
|
||||||
"name": "Spitzenpreis: Flexibilität"
|
|
||||||
},
|
|
||||||
"peak_price_min_distance_override": {
|
|
||||||
"name": "Spitzenpreis: Mindestabstand"
|
|
||||||
},
|
|
||||||
"peak_price_min_period_length_override": {
|
|
||||||
"name": "Spitzenpreis: Mindestperiodenlänge"
|
|
||||||
},
|
|
||||||
"peak_price_min_periods_override": {
|
|
||||||
"name": "Spitzenpreis: Mindestperioden"
|
|
||||||
},
|
|
||||||
"peak_price_relaxation_attempts_override": {
|
|
||||||
"name": "Spitzenpreis: Lockerungsversuche"
|
|
||||||
},
|
|
||||||
"peak_price_gap_count_override": {
|
|
||||||
"name": "Spitzenpreis: Lückentoleranz"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"switch": {
|
|
||||||
"best_price_enable_relaxation_override": {
|
|
||||||
"name": "Bestpreis: Mindestanzahl erreichen"
|
|
||||||
},
|
|
||||||
"peak_price_enable_relaxation_override": {
|
|
||||||
"name": "Spitzenpreis: Mindestanzahl erreichen"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"issues": {
|
"issues": {
|
||||||
|
|
@ -958,18 +830,6 @@
|
||||||
"homes_removed": {
|
"homes_removed": {
|
||||||
"title": "Tibber-Häuser entfernt",
|
"title": "Tibber-Häuser entfernt",
|
||||||
"description": "Wir haben erkannt, dass {count} Zuhause aus deinem Tibber-Konto entfernt wurde(n): {homes}. Bitte überprüfe deine Tibber-Integrationskonfiguration."
|
"description": "Wir haben erkannt, dass {count} Zuhause aus deinem Tibber-Konto entfernt wurde(n): {homes}. Bitte überprüfe deine Tibber-Integrationskonfiguration."
|
||||||
},
|
|
||||||
"tomorrow_data_missing": {
|
|
||||||
"title": "Preisdaten für morgen fehlen für {home_name}",
|
|
||||||
"description": "Die Strompreisdaten für morgen sind nach {warning_hour}:00 Uhr immer noch nicht verfügbar. Das ist ungewöhnlich, da Tibber normalerweise die Preise für morgen am Nachmittag veröffentlicht (ca. 13:00-14:00 Uhr MEZ).\n\nMögliche Ursachen:\n- Tibber hat die Preise für morgen noch nicht veröffentlicht\n- Temporäre API-Probleme\n- Dein Stromanbieter hat die Preise noch nicht an Tibber übermittelt\n\nDieses Problem löst sich automatisch, sobald die Daten für morgen verfügbar sind. Falls dies nach 20:00 Uhr weiterhin besteht, prüfe bitte die Tibber-App oder kontaktiere den Tibber-Support."
|
|
||||||
},
|
|
||||||
"rate_limit_exceeded": {
|
|
||||||
"title": "API-Ratenlimit erreicht für {home_name}",
|
|
||||||
"description": "Die Tibber-API hat diese Integration nach {error_count} aufeinanderfolgenden Fehlern ratenlimitiert. Das bedeutet, dass Anfragen zu häufig gestellt werden.\n\nDie Integration wird automatisch mit zunehmenden Verzögerungen erneut versuchen. Dieses Problem löst sich, sobald das Ratenlimit abläuft.\n\nFalls dies mehrere Stunden anhält, überprüfe:\n- Ob mehrere Home Assistant Instanzen denselben API-Token verwenden\n- Ob andere Anwendungen deinen Tibber-API-Token stark nutzen\n- Die Update-Frequenz reduzieren, falls du sie angepasst hast"
|
|
||||||
},
|
|
||||||
"home_not_found": {
|
|
||||||
"title": "Zuhause {home_name} nicht im Tibber-Konto gefunden",
|
|
||||||
"description": "Das in dieser Integration konfigurierte Zuhause (Eintrag-ID: {entry_id}) ist nicht mehr in deinem Tibber-Konto verfügbar. Dies passiert normalerweise, wenn:\n- Das Zuhause aus deinem Tibber-Konto gelöscht wurde\n- Das Zuhause zu einem anderen Tibber-Konto verschoben wurde\n- Der Zugriff auf dieses Zuhause widerrufen wurde\n\nBitte entferne diesen Integrationseintrag und füge ihn erneut hinzu, falls das Zuhause weiterhin überwacht werden soll. Um diesen Eintrag zu entfernen, gehe zu Einstellungen → Geräte & Dienste → Tibber Prices und lösche die Konfiguration {home_name}."
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"services": {
|
"services": {
|
||||||
|
|
@ -1001,23 +861,11 @@
|
||||||
},
|
},
|
||||||
"day": {
|
"day": {
|
||||||
"name": "Tag",
|
"name": "Tag",
|
||||||
"description": "Welcher Tag visualisiert werden soll (Standard: Rollierendes Fenster). Feste Tag-Optionen (Gestern/Heute/Morgen) zeigen 24h-Fenster ohne zusätzliche Abhängigkeiten. Dynamische Optionen benötigen config-template-card: Rollierendes Fenster zeigt ein festes 48h-Fenster, das automatisch zwischen gestern+heute und heute+morgen wechselt basierend auf Datenverfügbarkeit. Rollierendes Fenster (Auto-Zoom) verhält sich gleich, zoomt aber zusätzlich automatisch rein (2h Rückblick + verbleibende Zeit bis Mitternacht, graph_span verringert sich alle 15 Minuten)."
|
"description": "Welcher Tag visualisiert werden soll (gestern, heute oder morgen). Falls nicht angegeben, wird ein rollierendes 2-Tage-Fenster zurückgegeben: heute+morgen (wenn Daten für morgen verfügbar sind) oder gestern+heute (wenn Daten für morgen noch nicht verfügbar sind)."
|
||||||
},
|
},
|
||||||
"level_type": {
|
"level_type": {
|
||||||
"name": "Stufen-Typ",
|
"name": "Stufen-Typ",
|
||||||
"description": "Wähle, welche Preisstufen-Klassifizierung visualisiert werden soll: 'rating_level' (niedrig/normal/hoch basierend auf deinen konfigurierten Schwellenwerten) oder 'level' (Tibber-API-Stufen: sehr günstig/günstig/normal/teuer/sehr teuer)."
|
"description": "Wähle, welche Preisstufen-Klassifizierung visualisiert werden soll: 'rating_level' (niedrig/normal/hoch basierend auf deinen konfigurierten Schwellenwerten) oder 'level' (Tibber-API-Stufen: sehr günstig/günstig/normal/teuer/sehr teuer)."
|
||||||
},
|
|
||||||
"highlight_best_price": {
|
|
||||||
"name": "Bestpreis-Zeiträume hervorheben",
|
|
||||||
"description": "Füge eine halbtransparente grüne Überlagerung hinzu, um die Bestpreis-Zeiträume im Diagramm hervorzuheben. Dies erleichtert die visuelle Identifizierung der optimalen Zeiten für den Energieverbrauch."
|
|
||||||
},
|
|
||||||
"highlight_peak_price": {
|
|
||||||
"name": "Spitzenpreis-Zeiträume hervorheben",
|
|
||||||
"description": "Füge eine halbtransparente rote Überlagerung hinzu, um die Spitzenpreis-Zeiträume im Diagramm hervorzuheben. Dies erleichtert die visuelle Identifizierung der Zeiten, in denen Energie am teuersten ist."
|
|
||||||
},
|
|
||||||
"resolution": {
|
|
||||||
"name": "Auflösung",
|
|
||||||
"description": "Zeitauflösung für die Diagrammdaten. 'interval' (Standard): Originale 15-Minuten-Intervalle (96 Punkte pro Tag). 'hourly': Aggregierte Stundenwerte mit einem rollierenden 60-Minuten-Fenster (24 Punkte pro Tag) für ein übersichtlicheres Diagramm."
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
@ -1072,36 +920,36 @@
|
||||||
"description": "Ausgabeformat für die zurückgegebenen Daten. Optionen: 'array_of_objects' (Standard, Array von Objekten mit anpassbaren Feldnamen), 'array_of_arrays' (Array von [Zeitstempel, Preis]-Arrays mit abschließendem Null-Punkt für Stepline-Charts)."
|
"description": "Ausgabeformat für die zurückgegebenen Daten. Optionen: 'array_of_objects' (Standard, Array von Objekten mit anpassbaren Feldnamen), 'array_of_arrays' (Array von [Zeitstempel, Preis]-Arrays mit abschließendem Null-Punkt für Stepline-Charts)."
|
||||||
},
|
},
|
||||||
"array_fields": {
|
"array_fields": {
|
||||||
"name": "Array-Felder",
|
"name": "Array-Felder (nur Array von Arrays)",
|
||||||
"description": "Definiere, welche Felder im array_of_arrays-Format enthalten sein sollen. Verwende Feldnamen in geschweiften Klammern, getrennt durch Kommas. Verfügbare Felder: start_time, price_per_kwh, level, rating_level, average. Felder werden automatisch aktiviert, auch wenn include_*-Optionen nicht gesetzt sind. Leer lassen für Standard (nur Zeitstempel und Preis)."
|
"description": "[NUR FÜR Array von Arrays FORMAT] Definiere, welche Felder im array_of_arrays-Format enthalten sein sollen. Verwende Feldnamen in geschweiften Klammern, getrennt durch Kommas. Verfügbare Felder: start_time, price_per_kwh, level, rating_level, average. Felder werden automatisch aktiviert, auch wenn include_*-Optionen nicht gesetzt sind. Leer lassen für Standard (nur Zeitstempel und Preis)."
|
||||||
},
|
},
|
||||||
"subunit_currency": {
|
"minor_currency": {
|
||||||
"name": "Unterwährungseinheit",
|
"name": "Kleinere Währungseinheit",
|
||||||
"description": "Gibt Preise in Unterwährungseinheiten zurück (Cent für EUR, Øre für NOK/SEK) statt in Basiswährungseinheiten. Standardmäßig deaktiviert."
|
"description": "Gibt Preise in kleineren Währungseinheiten zurück (Cent für EUR, Øre für NOK/SEK) statt in Hauptwährungseinheiten. Standardmäßig deaktiviert."
|
||||||
},
|
},
|
||||||
"round_decimals": {
|
"round_decimals": {
|
||||||
"name": "Dezimalstellen runden",
|
"name": "Dezimalstellen runden",
|
||||||
"description": "Anzahl der Dezimalstellen, auf die Preise gerundet werden sollen (0-10). Falls nicht angegeben, wird die Standardgenauigkeit verwendet (4 Dezimalstellen für Basiswährung, 2 für Unterwährungseinheit)."
|
"description": "Anzahl der Dezimalstellen, auf die Preise gerundet werden sollen (0-10). Falls nicht angegeben, wird die Standardgenauigkeit verwendet (4 Dezimalstellen für Hauptwährung, 2 für kleinere Währungseinheit)."
|
||||||
},
|
},
|
||||||
"include_level": {
|
"include_level": {
|
||||||
"name": "Preisniveau einschließen",
|
"name": "Preisniveau einschließen (nur Array von Objekten)",
|
||||||
"description": "Fügt das Tibber-Preisniveau (sehr günstig/günstig/normal/teuer/sehr teuer) zu jedem Datenpunkt hinzu."
|
"description": "[NUR FÜR Array von Objekten FORMAT] Fügt das Tibber-Preisniveau (VERY_CHEAP, CHEAP, NORMAL, EXPENSIVE, VERY_EXPENSIVE) zu jedem Datenpunkt hinzu."
|
||||||
},
|
},
|
||||||
"include_rating_level": {
|
"include_rating_level": {
|
||||||
"name": "Preisbewertung einschließen",
|
"name": "Preisbewertung einschließen (nur Array von Objekten)",
|
||||||
"description": "Fügt die berechnete Preisbewertung (niedrig/normal/hoch) basierend auf deinen konfigurierten Schwellwerten zu jedem Datenpunkt hinzu."
|
"description": "[NUR FÜR Array von Objekten FORMAT] Fügt die berechnete Preisbewertung (LOW, NORMAL, HIGH) basierend auf deinen konfigurierten Schwellwerten zu jedem Datenpunkt hinzu."
|
||||||
},
|
},
|
||||||
"include_average": {
|
"include_average": {
|
||||||
"name": "Durchschnitt einschließen",
|
"name": "Durchschnitt einschließen (nur Array von Objekten)",
|
||||||
"description": "Den Tagesdurchschnittspreis in jedem Datenpunkt zum Vergleich einschließen."
|
"description": "[NUR FÜR Array von Objekten FORMAT] Den Tagesdurchschnittspreis in jedem Datenpunkt zum Vergleich einschließen."
|
||||||
},
|
},
|
||||||
"level_filter": {
|
"level_filter": {
|
||||||
"name": "Preisniveau-Filter",
|
"name": "Preisniveau-Filter",
|
||||||
"description": "Intervalle filtern, um nur bestimmte Tibber-Preisniveaus einzuschließen (sehr günstig/günstig/normal/teuer/sehr teuer). Falls nicht angegeben, werden alle Niveaus eingeschlossen."
|
"description": "Intervalle filtern, um nur bestimmte Tibber-Preisniveaus einzuschließen (VERY_CHEAP, CHEAP, NORMAL, EXPENSIVE, VERY_EXPENSIVE). Falls nicht angegeben, werden alle Niveaus eingeschlossen."
|
||||||
},
|
},
|
||||||
"rating_level_filter": {
|
"rating_level_filter": {
|
||||||
"name": "Preisbewertungs-Filter",
|
"name": "Preisbewertungs-Filter",
|
||||||
"description": "Intervalle filtern, um nur bestimmte Preisbewertungen einzuschließen (niedrig/normal/hoch). Falls nicht angegeben, werden alle Bewertungen eingeschlossen."
|
"description": "Intervalle filtern, um nur bestimmte Preisbewertungen einzuschließen (LOW, NORMAL, HIGH). Falls nicht angegeben, werden alle Bewertungen eingeschlossen."
|
||||||
},
|
},
|
||||||
"period_filter": {
|
"period_filter": {
|
||||||
"name": "Perioden-Filter",
|
"name": "Perioden-Filter",
|
||||||
|
|
@ -1113,43 +961,39 @@
|
||||||
},
|
},
|
||||||
"connect_segments": {
|
"connect_segments": {
|
||||||
"name": "Segmente verbinden",
|
"name": "Segmente verbinden",
|
||||||
"description": "[NUR BEI 'NULL-Werte einfügen'] Wenn aktiviert, werden an Segmentgrenzen Verbindungspunkte hinzugefügt, um verschiedene Preisstufen-Segmente in Stufenliniendiagrammen visuell zu verbinden. Bei fallendem Preis wird ein Punkt mit dem niedrigeren Preis am Ende des aktuellen Segments hinzugefügt. Bei steigendem Preis wird ein Haltepunkt vor der Lücke hinzugefügt. Dies erzeugt sanfte visuelle Übergänge zwischen Segmenten anstelle von abrupten Lücken."
|
"description": "[NUR MIT insert_nulls='segments'] Wenn aktiviert, werden an Segmentgrenzen Verbindungspunkte hinzugefügt, um verschiedene Preisstufen-Segmente in Stufenliniendiagrammen visuell zu verbinden. Bei fallendem Preis wird ein Punkt mit dem niedrigeren Preis am Ende des aktuellen Segments hinzugefügt. Bei steigendem Preis wird ein Haltepunkt vor der Lücke hinzugefügt. Dies erzeugt sanfte visuelle Übergänge zwischen Segmenten anstelle von abrupten Lücken."
|
||||||
},
|
},
|
||||||
"add_trailing_null": {
|
"add_trailing_null": {
|
||||||
"name": "Abschließenden Null-Punkt hinzufügen",
|
"name": "Abschließenden Null-Punkt hinzufügen",
|
||||||
"description": "Füge einen finalen Datenpunkt mit Nullwerten (außer Zeitstempel) am Ende hinzu. Einige Diagrammbibliotheken benötigen dies, um Extrapolation/Interpolation zum Viewport-Rand bei Verwendung von Stufendarstellung zu verhindern. Deaktiviert lassen, es sei denn, dein Diagramm benötigt es."
|
"description": "[BEIDE FORMATE] Füge einen finalen Datenpunkt mit Nullwerten (außer Zeitstempel) am Ende hinzu. Einige Diagrammbibliotheken benötigen dies, um Extrapolation/Interpolation zum Viewport-Rand bei Verwendung von Stufendarstellung zu verhindern. Deaktiviert lassen, es sei denn, dein Diagramm benötigt es."
|
||||||
},
|
},
|
||||||
"start_time_field": {
|
"start_time_field": {
|
||||||
"name": "Startzeit-Feldname",
|
"name": "Startzeit-Feldname (nur Array von Objekten)",
|
||||||
"description": "Benutzerdefinierter Name für das Startzeit-Feld in der Ausgabe. Standardmäßig 'start_time', wenn nicht angegeben."
|
"description": "[NUR FÜR Array von Objekten FORMAT] Benutzerdefinierter Name für das Startzeit-Feld in der Ausgabe. Standardmäßig 'start_time', wenn nicht angegeben."
|
||||||
},
|
},
|
||||||
"end_time_field": {
|
"end_time_field": {
|
||||||
"name": "Endzeit-Feldname",
|
"name": "Endzeit-Feldname (nur Array von Objekten)",
|
||||||
"description": "Benutzerdefinierter Name für das Endzeit-Feld in der Ausgabe. Standardmäßig 'end_time', wenn nicht angegeben. Nur verwendet mit period_filter."
|
"description": "[NUR FÜR Array von Objekten FORMAT] Benutzerdefinierter Name für das Endzeit-Feld in der Ausgabe. Standardmäßig 'end_time', wenn nicht angegeben. Nur verwendet mit period_filter."
|
||||||
},
|
},
|
||||||
"price_field": {
|
"price_field": {
|
||||||
"name": "Preis-Feldname",
|
"name": "Preis-Feldname (nur Array von Objekten)",
|
||||||
"description": "Benutzerdefinierter Name für das Preis-Feld in der Ausgabe. Standard ist 'price_per_kwh', falls nicht angegeben."
|
"description": "[NUR FÜR Array von Objekten FORMAT] Benutzerdefinierter Name für das Preis-Feld in der Ausgabe. Standard ist 'price_per_kwh', falls nicht angegeben."
|
||||||
},
|
},
|
||||||
"level_field": {
|
"level_field": {
|
||||||
"name": "Preisniveau-Feldname",
|
"name": "Preisniveau-Feldname (nur Array von Objekten)",
|
||||||
"description": "Benutzerdefinierter Name für das Preisniveau-Feld in der Ausgabe. Standard ist 'level', falls nicht angegeben. Wird nur verwendet, wenn include_level aktiviert ist."
|
"description": "[NUR FÜR Array von Objekten FORMAT] Benutzerdefinierter Name für das Preisniveau-Feld in der Ausgabe. Standard ist 'level', falls nicht angegeben. Wird nur verwendet, wenn include_level aktiviert ist."
|
||||||
},
|
},
|
||||||
"rating_level_field": {
|
"rating_level_field": {
|
||||||
"name": "Preisbewertung-Feldname",
|
"name": "Preisbewertung-Feldname (nur Array von Objekten)",
|
||||||
"description": "Benutzerdefinierter Name für das Preisbewertungs-Feld in der Ausgabe. Standard ist 'rating_level', falls nicht angegeben. Wird nur verwendet, wenn include_rating_level aktiviert ist."
|
"description": "[NUR FÜR Array von Objekten FORMAT] Benutzerdefinierter Name für das Preisbewertungs-Feld in der Ausgabe. Standard ist 'rating_level', falls nicht angegeben. Wird nur verwendet, wenn include_rating_level aktiviert ist."
|
||||||
},
|
},
|
||||||
"average_field": {
|
"average_field": {
|
||||||
"name": "Durchschnitts-Feldname",
|
"name": "Durchschnitts-Feldname (nur Array von Objekten)",
|
||||||
"description": "Benutzerdefinierter Name für das Durchschnitts-Feld in der Ausgabe. Standard ist 'average', falls nicht angegeben. Wird nur verwendet, wenn include_average aktiviert ist."
|
"description": "[NUR FÜR Array von Objekten FORMAT] Benutzerdefinierter Name für das Durchschnitts-Feld in der Ausgabe. Standard ist 'average', falls nicht angegeben. Wird nur verwendet, wenn include_average aktiviert ist."
|
||||||
},
|
|
||||||
"metadata": {
|
|
||||||
"name": "Metadaten",
|
|
||||||
"description": "Steuerung der Metadaten-Einbindung in der Antwort. 'include' (Standard): Gibt Chart-Daten und Metadaten mit Preisstatistiken, Währungsinformationen, Y-Achsen-Vorschlägen und Zeitbereich zurück. 'only': Gibt nur Metadaten zurück ohne Chart-Daten zu verarbeiten (schnell, nützlich für dynamische Y-Achsen-Konfiguration). 'none': Gibt nur Chart-Daten ohne Metadaten zurück."
|
|
||||||
},
|
},
|
||||||
"data_key": {
|
"data_key": {
|
||||||
"name": "Daten-Schlüssel",
|
"name": "Daten-Schlüssel (beide Formate)",
|
||||||
"description": "Benutzerdefinierter Name für den obersten Datenschlüssel in der Antwort. Standard ist 'data', falls nicht angegeben."
|
"description": "[BEIDE FORMATE] Benutzerdefinierter Name für den obersten Datenschlüssel in der Antwort. Standard ist 'data', falls nicht angegeben. Für ApexCharts-Kompatibilität mit Array von Arrays verwende 'points'."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
@ -1174,9 +1018,7 @@
|
||||||
"options": {
|
"options": {
|
||||||
"yesterday": "Gestern",
|
"yesterday": "Gestern",
|
||||||
"today": "Heute",
|
"today": "Heute",
|
||||||
"tomorrow": "Morgen",
|
"tomorrow": "Morgen"
|
||||||
"rolling_window": "Rollierendes Fenster",
|
|
||||||
"rolling_window_autozoom": "Rollierendes Fenster (Auto-Zoom)"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"resolution": {
|
"resolution": {
|
||||||
|
|
@ -1226,13 +1068,6 @@
|
||||||
"peak_price": "Spitzenpreis-Zeiträume"
|
"peak_price": "Spitzenpreis-Zeiträume"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"metadata": {
|
|
||||||
"options": {
|
|
||||||
"include": "Einbeziehen (Daten + Metadaten)",
|
|
||||||
"only": "Nur Metadaten",
|
|
||||||
"none": "Keine (nur Daten)"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"volatility": {
|
"volatility": {
|
||||||
"options": {
|
"options": {
|
||||||
"low": "Niedrig",
|
"low": "Niedrig",
|
||||||
|
|
@ -1250,18 +1085,6 @@
|
||||||
"expensive": "Teuer",
|
"expensive": "Teuer",
|
||||||
"very_expensive": "Sehr teuer"
|
"very_expensive": "Sehr teuer"
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"currency_display_mode": {
|
|
||||||
"options": {
|
|
||||||
"base": "Basiswährung (€, kr)",
|
|
||||||
"subunit": "Unterwährungseinheit (ct, øre)"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"average_sensor_display": {
|
|
||||||
"options": {
|
|
||||||
"median": "Median",
|
|
||||||
"mean": "Arithmetisches Mittel"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"title": "Tibber Preisinformationen & Bewertungen"
|
"title": "Tibber Preisinformationen & Bewertungen"
|
||||||
|
|
|
||||||
|
|
@ -11,14 +11,14 @@
|
||||||
},
|
},
|
||||||
"new_token": {
|
"new_token": {
|
||||||
"title": "Enter API Token",
|
"title": "Enter API Token",
|
||||||
"description": "Set up Tibber Price Information & Ratings.\n\nTo generate an API access token, visit [{tibber_url}]({tibber_url}).",
|
"description": "Set up Tibber Price Information & Ratings.\n\nTo generate an API access token, visit https://developer.tibber.com.",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API access token"
|
"access_token": "API access token"
|
||||||
},
|
},
|
||||||
"submit": "Validate Token"
|
"submit": "Validate Token"
|
||||||
},
|
},
|
||||||
"user": {
|
"user": {
|
||||||
"description": "Set up Tibber Price Information & Ratings.\n\nTo generate an API access token, visit [{tibber_url}]({tibber_url}).",
|
"description": "Set up Tibber Price Information & Ratings.\n\nTo generate an API access token, visit https://developer.tibber.com.",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API access token"
|
"access_token": "API access token"
|
||||||
},
|
},
|
||||||
|
|
@ -42,7 +42,7 @@
|
||||||
},
|
},
|
||||||
"reauth_confirm": {
|
"reauth_confirm": {
|
||||||
"title": "Reauthenticate Tibber Price Integration",
|
"title": "Reauthenticate Tibber Price Integration",
|
||||||
"description": "The access token for Tibber is no longer valid. Please enter a new API access token to continue using this integration.\n\nTo generate a new API access token, visit [{tibber_url}]({tibber_url}).",
|
"description": "The access token for Tibber is no longer valid. Please enter a new API access token to continue using this integration.\n\nTo generate a new API access token, visit https://developer.tibber.com.",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API access token"
|
"access_token": "API access token"
|
||||||
},
|
},
|
||||||
|
|
@ -77,23 +77,7 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"common": {
|
"common": {
|
||||||
"step_progress": "{step_num} / {total_steps}",
|
"step_progress": "{step_num} / {total_steps}"
|
||||||
"override_warning_template": "⚠️ {fields} controlled by config entity",
|
|
||||||
"override_warning_and": "and",
|
|
||||||
"override_field_label_best_price_min_period_length": "Minimum Period Length",
|
|
||||||
"override_field_label_best_price_max_level_gap_count": "Gap Tolerance",
|
|
||||||
"override_field_label_best_price_flex": "Flexibility",
|
|
||||||
"override_field_label_best_price_min_distance_from_avg": "Minimum Distance",
|
|
||||||
"override_field_label_enable_min_periods_best": "Achieve Minimum Count",
|
|
||||||
"override_field_label_min_periods_best": "Minimum Periods",
|
|
||||||
"override_field_label_relaxation_attempts_best": "Relaxation Attempts",
|
|
||||||
"override_field_label_peak_price_min_period_length": "Minimum Period Length",
|
|
||||||
"override_field_label_peak_price_max_level_gap_count": "Gap Tolerance",
|
|
||||||
"override_field_label_peak_price_flex": "Flexibility",
|
|
||||||
"override_field_label_peak_price_min_distance_from_avg": "Minimum Distance",
|
|
||||||
"override_field_label_enable_min_periods_peak": "Achieve Minimum Count",
|
|
||||||
"override_field_label_min_periods_peak": "Minimum Periods",
|
|
||||||
"override_field_label_relaxation_attempts_peak": "Relaxation Attempts"
|
|
||||||
},
|
},
|
||||||
"config_subentries": {
|
"config_subentries": {
|
||||||
"home": {
|
"home": {
|
||||||
|
|
@ -148,75 +132,38 @@
|
||||||
"options": {
|
"options": {
|
||||||
"step": {
|
"step": {
|
||||||
"init": {
|
"init": {
|
||||||
"menu_options": {
|
|
||||||
"general_settings": "⚙️ General Settings",
|
|
||||||
"display_settings": "💱 Currency Display",
|
|
||||||
"current_interval_price_rating": "📊 Price Rating",
|
|
||||||
"price_level": "🏷️ Price Level",
|
|
||||||
"volatility": "💨 Price Volatility",
|
|
||||||
"best_price": "💚 Best Price Period",
|
|
||||||
"peak_price": "🔴 Peak Price Period",
|
|
||||||
"price_trend": "📈 Price Trend",
|
|
||||||
"chart_data_export": "📊 Chart Data Export Sensor",
|
|
||||||
"reset_to_defaults": "🔄 Reset to Defaults",
|
|
||||||
"finish": "⬅️ Back"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"general_settings": {
|
|
||||||
"title": "⚙️ General Settings",
|
"title": "⚙️ General Settings",
|
||||||
"description": "**Configure general settings for Tibber Price Information & Ratings.**\n\n---\n\n**User:** {user_login}",
|
"description": "_{step_progress}_\n\n**Configure general settings for Tibber Price Information & Ratings.**\n\n---\n\n**User:** {user_login}",
|
||||||
"data": {
|
"data": {
|
||||||
"extended_descriptions": "Extended Descriptions",
|
"extended_descriptions": "Extended Descriptions"
|
||||||
"average_sensor_display": "Average Sensor Display"
|
|
||||||
},
|
},
|
||||||
"data_description": {
|
"data_description": {
|
||||||
"extended_descriptions": "Controls whether entity attributes include detailed explanations and usage tips.\n\n• Disabled (default): Brief description only\n• Enabled: Detailed explanation + practical usage examples\n\nExample:\nDisabled = 1 attribute\nEnabled = 2 additional attributes",
|
"extended_descriptions": "Controls whether entity attributes include detailed explanations and usage tips.\n\n• Disabled (default): Brief description only\n• Enabled: Detailed explanation + practical usage examples\n\nExample:\nDisabled = 1 attribute\nEnabled = 2 additional attributes"
|
||||||
"average_sensor_display": "Choose which statistical measure to display in the sensor state for average price sensors. The other value will be shown as an attribute.\n\n• **Median (default)**: Shows the 'typical' price, resistant to extreme spikes - best for display and human interpretation\n• **Arithmetic Mean**: Shows the true mathematical average including all prices - best when you need exact cost calculations\n\nFor automations, use the attribute `price_mean` or `price_median` to access both values regardless of this setting."
|
|
||||||
},
|
},
|
||||||
"submit": "↩ Save & Back"
|
"submit": "Continue →"
|
||||||
},
|
|
||||||
"display_settings": {
|
|
||||||
"title": "💱 Currency Display Settings",
|
|
||||||
"description": "**Configure how electricity prices are displayed - in base currency (€, kr) or subunit (ct, øre).**\n\n---",
|
|
||||||
"data": {
|
|
||||||
"currency_display_mode": "Display Mode"
|
|
||||||
},
|
|
||||||
"data_description": {
|
|
||||||
"currency_display_mode": "Choose how prices are displayed:\n\n• **Base Currency** (€/kWh, kr/kWh): Decimal values (e.g., 0.25 €/kWh) - differences visible from 3rd-4th decimal place\n• **Subunit Currency** (ct/kWh, øre/kWh): Larger values (e.g., 25.00 ct/kWh) - differences visible from 1st decimal place\n\nDefault depends on your currency:\n• EUR → Subunit (cents) - German/Dutch preference\n• NOK/SEK/DKK → Base (kroner) - Scandinavian preference\n• USD/GBP → Base currency\n\n**💡 Tip:** When selecting Subunit Currency, you can enable the additional \"Current Electricity Price (Energy Dashboard)\" sensor (disabled by default)."
|
|
||||||
},
|
|
||||||
"submit": "↩ Save & Back"
|
|
||||||
},
|
},
|
||||||
"current_interval_price_rating": {
|
"current_interval_price_rating": {
|
||||||
"title": "📊 Price Rating Settings",
|
"title": "📊 Price Rating Thresholds",
|
||||||
"description": "**Configure thresholds and stabilization for price rating levels (low/normal/high) based on comparison with trailing 24-hour average.**{entity_warning}",
|
"description": "_{step_progress}_\n\n**Configure thresholds for price rating levels (low/normal/high) based on comparison with trailing 24-hour average.**\n\n---",
|
||||||
"data": {
|
"sections": {
|
||||||
"price_rating_threshold_low": "Low Threshold",
|
"price_rating_thresholds": {
|
||||||
"price_rating_threshold_high": "High Threshold",
|
"name": "Price Rating Thresholds",
|
||||||
"price_rating_hysteresis": "Hysteresis",
|
"description": "Define price rating classification levels.",
|
||||||
"price_rating_gap_tolerance": "Gap Tolerance"
|
"data": {
|
||||||
|
"price_rating_threshold_low": "Low Threshold",
|
||||||
|
"price_rating_threshold_high": "High Threshold"
|
||||||
|
},
|
||||||
|
"data_description": {
|
||||||
|
"price_rating_threshold_low": "Percentage below the trailing 24-hour average that the current price must be to qualify as 'low' rating. Example: 5 means at least 5% below average. Sensors with this rating indicate favorable time windows. Default: 5%",
|
||||||
|
"price_rating_threshold_high": "Percentage above the trailing 24-hour average that the current price must be to qualify as 'high' rating. Example: 10 means at least 10% above average. Sensors with this rating warn about expensive time windows. Default: 10%"
|
||||||
|
}
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"data_description": {
|
"submit": "Continue →"
|
||||||
"price_rating_threshold_low": "Percentage below the trailing 24-hour average that the current price must be to qualify as 'low' rating. Example: -10 means at least 10% below average. Sensors with this rating indicate favorable time windows. Default: -10%",
|
|
||||||
"price_rating_threshold_high": "Percentage above the trailing 24-hour average that the current price must be to qualify as 'high' rating. Example: 10 means at least 10% above average. Sensors with this rating warn about expensive time windows. Default: 10%",
|
|
||||||
"price_rating_hysteresis": "Percentage band around thresholds to prevent rapid state changes. When the rating is already LOW, the price must rise above (threshold + hysteresis) to switch to NORMAL. Similarly, HIGH requires the price to fall below (threshold - hysteresis) to leave. This provides stability for automations that react to rating changes. Set to 0 to disable. Default: 2%",
|
|
||||||
"price_rating_gap_tolerance": "Maximum number of consecutive intervals that can be 'smoothed out' if they differ from surrounding ratings. Small isolated rating changes are merged into the dominant neighboring block. This provides stability for automations by preventing brief rating spikes from triggering unnecessary actions. Example: 1 means a single 'normal' interval surrounded by 'high' intervals gets corrected to 'high'. Set to 0 to disable. Default: 1"
|
|
||||||
},
|
|
||||||
"submit": "↩ Save & Back"
|
|
||||||
},
|
|
||||||
"price_level": {
|
|
||||||
"title": "🏷️ Price Level Settings",
|
|
||||||
"description": "**Configure stabilization for Tibber's price level classification (very cheap/cheap/normal/expensive/very expensive).**\n\nTibber's API provides a price level field for each interval. This setting smooths out brief fluctuations to prevent automation instability.{entity_warning}",
|
|
||||||
"data": {
|
|
||||||
"price_level_gap_tolerance": "Gap Tolerance"
|
|
||||||
},
|
|
||||||
"data_description": {
|
|
||||||
"price_level_gap_tolerance": "Maximum number of consecutive intervals that can be 'smoothed out' if they differ from surrounding price levels. Small isolated level changes are merged into the dominant neighboring block. Example: 1 means a single 'normal' interval surrounded by 'cheap' intervals gets corrected to 'cheap'. Set to 0 to disable. Default: 1"
|
|
||||||
},
|
|
||||||
"submit": "↩ Save & Back"
|
|
||||||
},
|
},
|
||||||
"best_price": {
|
"best_price": {
|
||||||
"title": "💚 Best Price Period Settings",
|
"title": "💚 Best Price Period Settings",
|
||||||
"description": "**Configure settings for the Best Price Period binary sensor. This sensor is active during periods with the lowest electricity prices.**{entity_warning}{override_warning}\n\n---",
|
"description": "_{step_progress}_\n\n**Configure settings for the Best Price Period binary sensor. This sensor is active during periods with the lowest electricity prices.**\n\n---",
|
||||||
"sections": {
|
"sections": {
|
||||||
"period_settings": {
|
"period_settings": {
|
||||||
"name": "Period Duration & Levels",
|
"name": "Period Duration & Levels",
|
||||||
|
|
@ -228,8 +175,8 @@
|
||||||
},
|
},
|
||||||
"data_description": {
|
"data_description": {
|
||||||
"best_price_min_period_length": "Minimum duration for a period to be considered as 'best price'. Longer periods are more practical for running appliances like dishwashers or heat pumps. Best price periods require 60 minutes minimum (vs. 30 minutes for peak price warnings) because they should provide meaningful time windows for consumption planning, not just brief opportunities.",
|
"best_price_min_period_length": "Minimum duration for a period to be considered as 'best price'. Longer periods are more practical for running appliances like dishwashers or heat pumps. Best price periods require 60 minutes minimum (vs. 30 minutes for peak price warnings) because they should provide meaningful time windows for consumption planning, not just brief opportunities.",
|
||||||
"best_price_max_level": "Only show best price periods if they contain intervals with price levels ≤ selected value. For example, selecting '**Cheap**' means the period must have at least one '**Very cheap**' or '**Cheap**' interval. This ensures 'best price' periods are not just relatively cheap for the day, but actually cheap in absolute terms. Select '**Any**' to show best prices regardless of their absolute price level.",
|
"best_price_max_level": "Only show best price periods if they contain intervals with price levels ≤ selected value. For example, selecting 'Cheap' means the period must have at least one 'VERY_CHEAP' or 'CHEAP' interval. This ensures 'best price' periods are not just relatively cheap for the day, but actually cheap in absolute terms. Select 'Any' to show best prices regardless of their absolute price level.",
|
||||||
"best_price_max_level_gap_count": "Maximum number of consecutive intervals allowed that deviate by exactly one level step from the required level. For example: with '**Cheap**' filter and gap count 1, a sequence '**Cheap**, **Cheap**, **Normal**, **Cheap**' is accepted (**Normal** is one step above **Cheap**). This prevents periods from being split by occasional level deviations. **Note:** Gap tolerance requires periods ≥90 minutes (6 intervals) to detect outliers effectively. Default: 0 (strict filtering, no tolerance)."
|
"best_price_max_level_gap_count": "Maximum number of consecutive intervals allowed that deviate by exactly one level step from the required level. For example: with 'Cheap' filter and gap count 1, a sequence 'CHEAP, CHEAP, NORMAL, CHEAP' is accepted (NORMAL is one step above CHEAP). This prevents periods from being split by occasional level deviations. **Note:** Gap tolerance requires periods ≥90 minutes (6 intervals) to detect outliers effectively. Default: 0 (strict filtering, no tolerance)."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flexibility_settings": {
|
"flexibility_settings": {
|
||||||
|
|
@ -259,11 +206,11 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"submit": "↩ Save & Back"
|
"submit": "Continue →"
|
||||||
},
|
},
|
||||||
"peak_price": {
|
"peak_price": {
|
||||||
"title": "🔴 Peak Price Period Settings",
|
"title": "🔴 Peak Price Period Settings",
|
||||||
"description": "**Configure settings for the Peak Price Period binary sensor. This sensor is active during periods with the highest electricity prices.**{entity_warning}{override_warning}\n\n---",
|
"description": "_{step_progress}_\n\n**Configure settings for the Peak Price Period binary sensor. This sensor is active during periods with the highest electricity prices.**\n\n---",
|
||||||
"sections": {
|
"sections": {
|
||||||
"period_settings": {
|
"period_settings": {
|
||||||
"name": "Period Settings",
|
"name": "Period Settings",
|
||||||
|
|
@ -275,8 +222,8 @@
|
||||||
},
|
},
|
||||||
"data_description": {
|
"data_description": {
|
||||||
"peak_price_min_period_length": "Minimum duration for a period to be considered as 'peak price'. Peak price warnings are allowed for shorter periods (30 minutes minimum vs. 60 minutes for best price) because brief expensive spikes are worth alerting about, even if they're too short for consumption planning.",
|
"peak_price_min_period_length": "Minimum duration for a period to be considered as 'peak price'. Peak price warnings are allowed for shorter periods (30 minutes minimum vs. 60 minutes for best price) because brief expensive spikes are worth alerting about, even if they're too short for consumption planning.",
|
||||||
"peak_price_min_level": "Only show peak price periods if they contain intervals with price levels ≥ selected value. For example, selecting '**Expensive**' means the period must have at least one '**Expensive**' or '**Very expensive**' interval. This ensures 'peak price' periods are not just relatively expensive for the day, but actually expensive in absolute terms. Select '**Any**' to show peak prices regardless of their absolute price level.",
|
"peak_price_min_level": "Only show peak price periods if they contain intervals with price levels ≥ selected value. For example, selecting 'Expensive' means the period must have at least one 'EXPENSIVE' or 'VERY_EXPENSIVE' interval. This ensures 'peak price' periods are not just relatively expensive for the day, but actually expensive in absolute terms. Select 'Any' to show peak prices regardless of their absolute price level.",
|
||||||
"peak_price_max_level_gap_count": "Maximum number of consecutive intervals allowed that deviate by exactly one level step from the required level. For example: with '**Expensive**' filter and gap count 2, a sequence '**Expensive**, **Normal**, **Normal**, **Expensive**' is accepted (**Normal** is one step below **Expensive**). This prevents periods from being split by occasional level deviations. **Note:** Gap tolerance requires periods ≥90 minutes (6 intervals) to detect outliers effectively. Default: 0 (strict filtering, no tolerance)."
|
"peak_price_max_level_gap_count": "Maximum number of consecutive intervals allowed that deviate by exactly one level step from the required level. For example: with 'Expensive' filter and gap count 2, a sequence 'EXPENSIVE, NORMAL, NORMAL, EXPENSIVE' is accepted (NORMAL is one step below EXPENSIVE). This prevents periods from being split by occasional level deviations. **Note:** Gap tolerance requires periods ≥90 minutes (6 intervals) to detect outliers effectively. Default: 0 (strict filtering, no tolerance)."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flexibility_settings": {
|
"flexibility_settings": {
|
||||||
|
|
@ -306,52 +253,52 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"submit": "↩ Save & Back"
|
"submit": "Continue →"
|
||||||
},
|
},
|
||||||
"price_trend": {
|
"price_trend": {
|
||||||
"title": "📈 Price Trend Thresholds",
|
"title": "📈 Price Trend Thresholds",
|
||||||
"description": "**Configure thresholds for price trend sensors.** These sensors compare current price with the average of the next N hours to determine if prices are rising, falling, or stable.\n\n**5-Level Scale:** Uses strongly_falling (-2), falling (-1), stable (0), rising (+1), strongly_rising (+2) for automation comparisons via trend_value attribute.{entity_warning}",
|
"description": "_{step_progress}_\n\n**Configure thresholds for price trend sensors. These sensors compare current price with the average of the next N hours to determine if prices are rising, falling, or stable.**\n\n---",
|
||||||
"data": {
|
"sections": {
|
||||||
"price_trend_threshold_rising": "Rising Threshold",
|
"price_trend_thresholds": {
|
||||||
"price_trend_threshold_strongly_rising": "Strongly Rising Threshold",
|
"name": "Price Trend Thresholds",
|
||||||
"price_trend_threshold_falling": "Falling Threshold",
|
"description": "Define price trend classification levels.",
|
||||||
"price_trend_threshold_strongly_falling": "Strongly Falling Threshold"
|
"data": {
|
||||||
|
"price_trend_threshold_rising": "Rising Threshold",
|
||||||
|
"price_trend_threshold_falling": "Falling Threshold"
|
||||||
|
},
|
||||||
|
"data_description": {
|
||||||
|
"price_trend_threshold_rising": "Percentage that the average of the next N hours must be above the current price to qualify as 'rising' trend. Example: 5 means average is at least 5% higher → prices will rise. Typical values: 5-15%. Default: 5%",
|
||||||
|
"price_trend_threshold_falling": "Percentage (negative) that the average of the next N hours must be below the current price to qualify as 'falling' trend. Example: -5 means average is at least 5% lower → prices will fall. Typical values: -5 to -15%. Default: -5%"
|
||||||
|
}
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"data_description": {
|
"submit": "Continue →"
|
||||||
"price_trend_threshold_rising": "Percentage that the average of the next N hours must be above the current price to qualify as 'rising' trend. Example: 3 means average is at least 3% higher → prices will rise. Typical values: 3-10%. Default: 3%",
|
|
||||||
"price_trend_threshold_strongly_rising": "Percentage for 'strongly rising' trend. Must be higher than rising threshold. Example: 6 means average is at least 6% higher → prices will rise significantly. Typical values: 6-15%. Default: 6%",
|
|
||||||
"price_trend_threshold_falling": "Percentage (negative) that the average of the next N hours must be below the current price to qualify as 'falling' trend. Example: -3 means average is at least 3% lower → prices will fall. Typical values: -3 to -10%. Default: -3%",
|
|
||||||
"price_trend_threshold_strongly_falling": "Percentage (negative) for 'strongly falling' trend. Must be lower (more negative) than falling threshold. Example: -6 means average is at least 6% lower → prices will fall significantly. Typical values: -6 to -15%. Default: -6%"
|
|
||||||
},
|
|
||||||
"submit": "↩ Save & Back"
|
|
||||||
},
|
|
||||||
"volatility": {
|
|
||||||
"title": "💨 Price Volatility Thresholds",
|
|
||||||
"description": "**Configure thresholds for volatility classification.** Volatility measures relative price variation using the coefficient of variation (CV = standard deviation / mean × 100%). These thresholds are percentage values that work across all price levels.\n\nUsed by:\n• Volatility sensors (classification)\n• Trend sensors (adaptive threshold adjustment: <moderate = more sensitive, ≥high = less sensitive){entity_warning}",
|
|
||||||
"data": {
|
|
||||||
"volatility_threshold_moderate": "Moderate Threshold",
|
|
||||||
"volatility_threshold_high": "High Threshold",
|
|
||||||
"volatility_threshold_very_high": "Very High Threshold"
|
|
||||||
},
|
|
||||||
"data_description": {
|
|
||||||
"volatility_threshold_moderate": "Coefficient of Variation (CV) at which prices are considered 'moderately volatile'. CV = (standard deviation / mean) × 100%. Example: 15 means price fluctuations of ±15% around average. Sensors show this classification, trend sensors become more sensitive. Default: 15%",
|
|
||||||
"volatility_threshold_high": "Coefficient of Variation (CV) at which prices are considered 'highly volatile'. Example: 30 means price fluctuations of ±30% around average. Larger price jumps expected, trend sensors become less sensitive. Default: 30%",
|
|
||||||
"volatility_threshold_very_high": "Coefficient of Variation (CV) at which prices are considered 'very highly volatile'. Example: 50 means extreme price fluctuations of ±50% around average. On such days, strong price spikes are likely. Default: 50%"
|
|
||||||
},
|
|
||||||
"submit": "↩ Save & Back"
|
|
||||||
},
|
},
|
||||||
"chart_data_export": {
|
"chart_data_export": {
|
||||||
"title": "📊 Chart Data Export Sensor",
|
"title": "📊 Chart Data Export Sensor",
|
||||||
"description": "The Chart Data Export Sensor provides price data as sensor attributes.\n\n⚠️ **Note:** This sensor is a legacy feature for compatibility with older tools.\n\n**Recommended for new setups:** Use the `tibber_prices.get_chartdata` **service directly** - it's more flexible, efficient, and the modern Home Assistant approach.\n\n**When this sensor makes sense:**\n\n✅ Your dashboard tool can **only** read attributes (no service calls)\n✅ You need static data that updates automatically\n❌ **Not for automations:** Use `tibber_prices.get_chartdata` directly there - more flexible and efficient!\n\n---\n\n{sensor_status_info}",
|
"description": "_{step_progress}_\n\nThe Chart Data Export Sensor provides price data as sensor attributes.\n\n⚠️ **Note:** This sensor is a legacy feature for compatibility with older tools.\n\n**Recommended for new setups:** Use the `tibber_prices.get_chartdata` **service directly** - it's more flexible, efficient, and the modern Home Assistant approach.\n\n**When this sensor makes sense:**\n\n✅ Your dashboard tool can **only** read attributes (no service calls)\n✅ You need static data that updates automatically\n❌ **Not for automations:** Use `tibber_prices.get_chartdata` directly there - more flexible and efficient!\n\n---\n\n**Enable the sensor:**\n\n1. Open **Settings → Devices & Services → Tibber Prices**\n2. Select your home → Find **'Chart Data Export'** (Diagnostic section)\n3. **Enable the sensor** (disabled by default)\n\n**Configuration (optional):**\n\nDefault settings work out-of-the-box (today+tomorrow, 15-minute intervals, prices only).\n\nFor customization, add to **`configuration.yaml`**:\n\n```yaml\ntibber_prices:\n chart_export:\n day:\n - today\n - tomorrow\n include_level: true\n include_rating_level: true\n```\n\n**All parameters:** See `tibber_prices.get_chartdata` service documentation",
|
||||||
"submit": "↩ Ok & Back"
|
"submit": "Complete ✓"
|
||||||
},
|
},
|
||||||
"reset_to_defaults": {
|
"volatility": {
|
||||||
"title": "🔄 Reset to Defaults",
|
"title": "💨 Price Volatility Thresholds",
|
||||||
"description": "⚠️ **Warning:** This will reset **ALL** settings to factory defaults.\n\n**What will be reset:**\n• All price rating thresholds\n• All volatility thresholds\n• All price trend thresholds\n• All best price period settings\n• All peak price period settings\n• Display settings\n• General settings\n\n**What will NOT be reset:**\n• Your Tibber API token\n• Selected home\n• Currency\n\n**💡 Tip:** This is useful if you want to start fresh after experimenting with settings.",
|
"description": "_{step_progress}_\n\n**Configure thresholds for volatility classification.** Volatility measures relative price variation using the coefficient of variation (CV = standard deviation / mean × 100%). These thresholds are percentage values that work across all price levels.\n\nUsed by:\n• Volatility sensors (classification)\n• Trend sensors (adaptive threshold adjustment: <moderate = more sensitive, ≥high = less sensitive)\n\n---",
|
||||||
"data": {
|
"sections": {
|
||||||
"confirm_reset": "Yes, reset everything to defaults"
|
"volatility_thresholds": {
|
||||||
|
"name": "Volatility Thresholds",
|
||||||
|
"description": "Define price volatility classification levels.",
|
||||||
|
"data": {
|
||||||
|
"volatility_threshold_moderate": "Moderate Threshold",
|
||||||
|
"volatility_threshold_high": "High Threshold",
|
||||||
|
"volatility_threshold_very_high": "Very High Threshold"
|
||||||
|
},
|
||||||
|
"data_description": {
|
||||||
|
"volatility_threshold_moderate": "Coefficient of Variation (CV) at which prices are considered 'moderately volatile'. CV = (standard deviation / mean) × 100%. Example: 15 means price fluctuations of ±15% around average. Sensors show this classification, trend sensors become more sensitive. Default: 15%",
|
||||||
|
"volatility_threshold_high": "Coefficient of Variation (CV) at which prices are considered 'highly volatile'. Example: 30 means price fluctuations of ±30% around average. Larger price jumps expected, trend sensors become less sensitive. Default: 30%",
|
||||||
|
"volatility_threshold_very_high": "Coefficient of Variation (CV) at which prices are considered 'very highly volatile'. Example: 50 means extreme price fluctuations of ±50% around average. On such days, strong price spikes are likely. Default: 50%"
|
||||||
|
}
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"submit": "Reset Now"
|
"submit": "Continue →"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"error": {
|
"error": {
|
||||||
|
|
@ -376,17 +323,10 @@
|
||||||
"invalid_volatility_threshold_very_high": "Very high volatility threshold must be between 35% and 80%",
|
"invalid_volatility_threshold_very_high": "Very high volatility threshold must be between 35% and 80%",
|
||||||
"invalid_volatility_thresholds": "Thresholds must be in ascending order: moderate < high < very high",
|
"invalid_volatility_thresholds": "Thresholds must be in ascending order: moderate < high < very high",
|
||||||
"invalid_price_trend_rising": "Rising trend threshold must be between 1% and 50%",
|
"invalid_price_trend_rising": "Rising trend threshold must be between 1% and 50%",
|
||||||
"invalid_price_trend_falling": "Falling trend threshold must be between -50% and -1%",
|
"invalid_price_trend_falling": "Falling trend threshold must be between -50% and -1%"
|
||||||
"invalid_price_trend_strongly_rising": "Strongly rising trend threshold must be between 2% and 100%",
|
|
||||||
"invalid_price_trend_strongly_falling": "Strongly falling trend threshold must be between -100% and -2%",
|
|
||||||
"invalid_trend_strongly_rising_less_than_rising": "Strongly rising threshold must be greater than rising threshold",
|
|
||||||
"invalid_trend_strongly_falling_greater_than_falling": "Strongly falling threshold must be less (more negative) than falling threshold"
|
|
||||||
},
|
},
|
||||||
"abort": {
|
"abort": {
|
||||||
"entry_not_found": "Tibber configuration entry not found.",
|
"entry_not_found": "Tibber configuration entry not found."
|
||||||
"reset_cancelled": "Reset cancelled. No changes were made to your configuration.",
|
|
||||||
"reset_successful": "✅ All settings have been reset to factory defaults. Your configuration is now like a fresh installation.",
|
|
||||||
"finished": "Configuration completed."
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"entity": {
|
"entity": {
|
||||||
|
|
@ -394,7 +334,7 @@
|
||||||
"current_interval_price": {
|
"current_interval_price": {
|
||||||
"name": "Current Electricity Price"
|
"name": "Current Electricity Price"
|
||||||
},
|
},
|
||||||
"current_interval_price_base": {
|
"current_interval_price_major": {
|
||||||
"name": "Current Electricity Price (Energy Dashboard)"
|
"name": "Current Electricity Price (Energy Dashboard)"
|
||||||
},
|
},
|
||||||
"next_interval_price": {
|
"next_interval_price": {
|
||||||
|
|
@ -616,91 +556,73 @@
|
||||||
"price_trend_1h": {
|
"price_trend_1h": {
|
||||||
"name": "Price Trend (1h)",
|
"name": "Price Trend (1h)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Strongly Rising",
|
|
||||||
"rising": "Rising",
|
"rising": "Rising",
|
||||||
"stable": "Stable",
|
|
||||||
"falling": "Falling",
|
"falling": "Falling",
|
||||||
"strongly_falling": "Strongly Falling"
|
"stable": "Stable"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_2h": {
|
"price_trend_2h": {
|
||||||
"name": "Price Trend (2h)",
|
"name": "Price Trend (2h)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Strongly Rising",
|
|
||||||
"rising": "Rising",
|
"rising": "Rising",
|
||||||
"stable": "Stable",
|
|
||||||
"falling": "Falling",
|
"falling": "Falling",
|
||||||
"strongly_falling": "Strongly Falling"
|
"stable": "Stable"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_3h": {
|
"price_trend_3h": {
|
||||||
"name": "Price Trend (3h)",
|
"name": "Price Trend (3h)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Strongly Rising",
|
|
||||||
"rising": "Rising",
|
"rising": "Rising",
|
||||||
"stable": "Stable",
|
|
||||||
"falling": "Falling",
|
"falling": "Falling",
|
||||||
"strongly_falling": "Strongly Falling"
|
"stable": "Stable"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_4h": {
|
"price_trend_4h": {
|
||||||
"name": "Price Trend (4h)",
|
"name": "Price Trend (4h)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Strongly Rising",
|
|
||||||
"rising": "Rising",
|
"rising": "Rising",
|
||||||
"stable": "Stable",
|
|
||||||
"falling": "Falling",
|
"falling": "Falling",
|
||||||
"strongly_falling": "Strongly Falling"
|
"stable": "Stable"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_5h": {
|
"price_trend_5h": {
|
||||||
"name": "Price Trend (5h)",
|
"name": "Price Trend (5h)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Strongly Rising",
|
|
||||||
"rising": "Rising",
|
"rising": "Rising",
|
||||||
"stable": "Stable",
|
|
||||||
"falling": "Falling",
|
"falling": "Falling",
|
||||||
"strongly_falling": "Strongly Falling"
|
"stable": "Stable"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_6h": {
|
"price_trend_6h": {
|
||||||
"name": "Price Trend (6h)",
|
"name": "Price Trend (6h)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Strongly Rising",
|
|
||||||
"rising": "Rising",
|
"rising": "Rising",
|
||||||
"stable": "Stable",
|
|
||||||
"falling": "Falling",
|
"falling": "Falling",
|
||||||
"strongly_falling": "Strongly Falling"
|
"stable": "Stable"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_8h": {
|
"price_trend_8h": {
|
||||||
"name": "Price Trend (8h)",
|
"name": "Price Trend (8h)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Strongly Rising",
|
|
||||||
"rising": "Rising",
|
"rising": "Rising",
|
||||||
"stable": "Stable",
|
|
||||||
"falling": "Falling",
|
"falling": "Falling",
|
||||||
"strongly_falling": "Strongly Falling"
|
"stable": "Stable"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_12h": {
|
"price_trend_12h": {
|
||||||
"name": "Price Trend (12h)",
|
"name": "Price Trend (12h)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Strongly Rising",
|
|
||||||
"rising": "Rising",
|
"rising": "Rising",
|
||||||
"stable": "Stable",
|
|
||||||
"falling": "Falling",
|
"falling": "Falling",
|
||||||
"strongly_falling": "Strongly Falling"
|
"stable": "Stable"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"current_price_trend": {
|
"current_price_trend": {
|
||||||
"name": "Current Price Trend",
|
"name": "Current Price Trend",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Strongly Rising",
|
|
||||||
"rising": "Rising",
|
"rising": "Rising",
|
||||||
"stable": "Stable",
|
|
||||||
"falling": "Falling",
|
"falling": "Falling",
|
||||||
"strongly_falling": "Strongly Falling"
|
"stable": "Stable"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"next_price_trend_change": {
|
"next_price_trend_change": {
|
||||||
|
|
@ -873,14 +795,6 @@
|
||||||
"ready": "Ready",
|
"ready": "Ready",
|
||||||
"error": "Error"
|
"error": "Error"
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"chart_metadata": {
|
|
||||||
"name": "Chart Metadata",
|
|
||||||
"state": {
|
|
||||||
"pending": "Pending",
|
|
||||||
"ready": "Ready",
|
|
||||||
"error": "Error"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"binary_sensor": {
|
"binary_sensor": {
|
||||||
|
|
@ -902,52 +816,6 @@
|
||||||
"realtime_consumption_enabled": {
|
"realtime_consumption_enabled": {
|
||||||
"name": "Realtime Consumption Enabled"
|
"name": "Realtime Consumption Enabled"
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"number": {
|
|
||||||
"best_price_flex_override": {
|
|
||||||
"name": "Best Price: Flexibility"
|
|
||||||
},
|
|
||||||
"best_price_min_distance_override": {
|
|
||||||
"name": "Best Price: Minimum Distance"
|
|
||||||
},
|
|
||||||
"best_price_min_period_length_override": {
|
|
||||||
"name": "Best Price: Minimum Period Length"
|
|
||||||
},
|
|
||||||
"best_price_min_periods_override": {
|
|
||||||
"name": "Best Price: Minimum Periods"
|
|
||||||
},
|
|
||||||
"best_price_relaxation_attempts_override": {
|
|
||||||
"name": "Best Price: Relaxation Attempts"
|
|
||||||
},
|
|
||||||
"best_price_gap_count_override": {
|
|
||||||
"name": "Best Price: Gap Tolerance"
|
|
||||||
},
|
|
||||||
"peak_price_flex_override": {
|
|
||||||
"name": "Peak Price: Flexibility"
|
|
||||||
},
|
|
||||||
"peak_price_min_distance_override": {
|
|
||||||
"name": "Peak Price: Minimum Distance"
|
|
||||||
},
|
|
||||||
"peak_price_min_period_length_override": {
|
|
||||||
"name": "Peak Price: Minimum Period Length"
|
|
||||||
},
|
|
||||||
"peak_price_min_periods_override": {
|
|
||||||
"name": "Peak Price: Minimum Periods"
|
|
||||||
},
|
|
||||||
"peak_price_relaxation_attempts_override": {
|
|
||||||
"name": "Peak Price: Relaxation Attempts"
|
|
||||||
},
|
|
||||||
"peak_price_gap_count_override": {
|
|
||||||
"name": "Peak Price: Gap Tolerance"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"switch": {
|
|
||||||
"best_price_enable_relaxation_override": {
|
|
||||||
"name": "Best Price: Achieve Minimum Count"
|
|
||||||
},
|
|
||||||
"peak_price_enable_relaxation_override": {
|
|
||||||
"name": "Peak Price: Achieve Minimum Count"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"issues": {
|
"issues": {
|
||||||
|
|
@ -958,18 +826,6 @@
|
||||||
"homes_removed": {
|
"homes_removed": {
|
||||||
"title": "Tibber homes removed",
|
"title": "Tibber homes removed",
|
||||||
"description": "We detected that {count} home(s) have been removed from your Tibber account: {homes}. Please review your Tibber integration configuration."
|
"description": "We detected that {count} home(s) have been removed from your Tibber account: {homes}. Please review your Tibber integration configuration."
|
||||||
},
|
|
||||||
"tomorrow_data_missing": {
|
|
||||||
"title": "Tomorrow's price data missing for {home_name}",
|
|
||||||
"description": "Tomorrow's electricity price data is still unavailable after {warning_hour}:00. This is unusual, as Tibber typically publishes tomorrow's prices in the afternoon (around 13:00-14:00 CET).\n\nPossible causes:\n- Tibber has not yet published tomorrow's prices\n- Temporary API issues\n- Your electricity provider has not submitted prices to Tibber\n\nThis issue will automatically resolve once tomorrow's data becomes available. If this persists beyond 20:00, please check the Tibber app or contact Tibber support."
|
|
||||||
},
|
|
||||||
"rate_limit_exceeded": {
|
|
||||||
"title": "API rate limit exceeded for {home_name}",
|
|
||||||
"description": "The Tibber API has rate-limited this integration after {error_count} consecutive errors. This means requests are being made too frequently.\n\nThe integration will automatically retry with increasing delays. This issue will resolve once the rate limit expires.\n\nIf this persists for several hours, consider:\n- Checking if multiple Home Assistant instances are using the same API token\n- Verifying no other applications are heavily using your Tibber API token\n- Reducing the update frequency if you've customized it"
|
|
||||||
},
|
|
||||||
"home_not_found": {
|
|
||||||
"title": "Home {home_name} not found in Tibber account",
|
|
||||||
"description": "The home configured in this integration (entry ID: {entry_id}) is no longer available in your Tibber account. This typically happens when:\n- The home was deleted from your Tibber account\n- The home was moved to a different Tibber account\n- Access to this home was revoked\n\nPlease remove this integration entry and re-add it if the home should still be monitored. To remove this entry, go to Settings → Devices & Services → Tibber Prices and delete the {home_name} configuration."
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"services": {
|
"services": {
|
||||||
|
|
@ -1001,23 +857,11 @@
|
||||||
},
|
},
|
||||||
"day": {
|
"day": {
|
||||||
"name": "Day",
|
"name": "Day",
|
||||||
"description": "Which day to visualize (default: Rolling Window). Fixed day options (Yesterday/Today/Tomorrow) show 24h spans without additional dependencies. Dynamic options require config-template-card: Rolling Window displays a fixed 48h window that automatically shifts between yesterday+today and today+tomorrow based on data availability. Rolling Window (Auto-Zoom) behaves the same but additionally auto-zooms in (2h lookback + remaining time until midnight, graph_span decreases every 15 minutes)."
|
"description": "Which day to visualize (yesterday, today, or tomorrow). If not specified, returns a rolling 2-day window: today+tomorrow (when tomorrow data is available) or yesterday+today (when tomorrow data is not yet available)."
|
||||||
},
|
},
|
||||||
"level_type": {
|
"level_type": {
|
||||||
"name": "Level Type",
|
"name": "Level Type",
|
||||||
"description": "Select which price level classification to visualize: 'rating_level' (low/normal/high based on your configured thresholds) or 'level' (Tibber API levels: very cheap/cheap/normal/expensive/very expensive)."
|
"description": "Select which price level classification to visualize: 'rating_level' (low/normal/high based on your configured thresholds) or 'level' (Tibber API levels: very cheap/cheap/normal/expensive/very expensive)."
|
||||||
},
|
|
||||||
"highlight_best_price": {
|
|
||||||
"name": "Highlight Best Price Periods",
|
|
||||||
"description": "Add a semi-transparent green overlay to highlight the best price periods on the chart. This makes it easy to visually identify the optimal times for energy consumption."
|
|
||||||
},
|
|
||||||
"highlight_peak_price": {
|
|
||||||
"name": "Highlight Peak Price Periods",
|
|
||||||
"description": "Add a semi-transparent red overlay to highlight the peak price periods on the chart. This makes it easy to visually identify times when energy is most expensive."
|
|
||||||
},
|
|
||||||
"resolution": {
|
|
||||||
"name": "Resolution",
|
|
||||||
"description": "Time resolution for the chart data. 'interval' (default): Original 15-minute intervals (96 points per day). 'hourly': Aggregated hourly values using a rolling 60-minute window (24 points per day) for a cleaner, less cluttered chart."
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
@ -1075,25 +919,25 @@
|
||||||
"name": "Array Fields",
|
"name": "Array Fields",
|
||||||
"description": "Define which fields to include. Use field names in curly braces, separated by commas. Available fields: start_time, price_per_kwh, level, rating_level, average. Fields will be automatically enabled even if include_* options are not set. Leave empty for default (timestamp and price only)."
|
"description": "Define which fields to include. Use field names in curly braces, separated by commas. Available fields: start_time, price_per_kwh, level, rating_level, average. Fields will be automatically enabled even if include_* options are not set. Leave empty for default (timestamp and price only)."
|
||||||
},
|
},
|
||||||
"subunit_currency": {
|
"minor_currency": {
|
||||||
"name": "Subunit Currency",
|
"name": "Minor Currency",
|
||||||
"description": "Return prices in subunit currency units (cents for EUR, øre for NOK/SEK) instead of base currency units. Disabled by default."
|
"description": "Return prices in minor currency units (cents for EUR, øre for NOK/SEK) instead of major currency units. Disabled by default."
|
||||||
},
|
},
|
||||||
"round_decimals": {
|
"round_decimals": {
|
||||||
"name": "Round Decimals",
|
"name": "Round Decimals",
|
||||||
"description": "Number of decimal places to round prices to (0-10). If not specified, uses default precision (4 decimals for base currency, 2 for subunit currency)."
|
"description": "Number of decimal places to round prices to (0-10). If not specified, uses default precision (4 decimals for major currency, 2 for minor currency)."
|
||||||
},
|
},
|
||||||
"data_key": {
|
"data_key": {
|
||||||
"name": "Data Key",
|
"name": "Data Key",
|
||||||
"description": "Custom name for the top-level data key in the response. Defaults to 'data' if not specified."
|
"description": "Custom name for the top-level data key in the response. Defaults to 'data' if not specified. For ApexCharts compatibility with Array of Arrays, use 'points'."
|
||||||
},
|
},
|
||||||
"include_level": {
|
"include_level": {
|
||||||
"name": "Include Level",
|
"name": "Include Level",
|
||||||
"description": "Include the Tibber price level field (very cheap/cheap/normal/expensive/very expensive) in each data point."
|
"description": "Include the Tibber price level field (VERY_CHEAP, CHEAP, NORMAL, EXPENSIVE, VERY_EXPENSIVE) in each data point."
|
||||||
},
|
},
|
||||||
"include_rating_level": {
|
"include_rating_level": {
|
||||||
"name": "Include Rating Level",
|
"name": "Include Rating Level",
|
||||||
"description": "Include the calculated rating level field (low/normal/high) based on your configured thresholds in each data point."
|
"description": "Include the calculated rating level field (LOW, NORMAL, HIGH) based on your configured thresholds in each data point."
|
||||||
},
|
},
|
||||||
"include_average": {
|
"include_average": {
|
||||||
"name": "Include Average",
|
"name": "Include Average",
|
||||||
|
|
@ -1101,11 +945,11 @@
|
||||||
},
|
},
|
||||||
"level_filter": {
|
"level_filter": {
|
||||||
"name": "Level Filter",
|
"name": "Level Filter",
|
||||||
"description": "Filter intervals to include only specific Tibber price levels (very cheap/cheap/normal/expensive/very expensive). If not specified, all levels are included."
|
"description": "Filter intervals to include only specific Tibber price levels (VERY_CHEAP, CHEAP, NORMAL, EXPENSIVE, VERY_EXPENSIVE). If not specified, all levels are included."
|
||||||
},
|
},
|
||||||
"rating_level_filter": {
|
"rating_level_filter": {
|
||||||
"name": "Rating Level Filter",
|
"name": "Rating Level Filter",
|
||||||
"description": "Filter intervals to include only specific rating levels (low/normal/high). If not specified, all rating levels are included."
|
"description": "Filter intervals to include only specific rating levels (LOW, NORMAL, HIGH). If not specified, all rating levels are included."
|
||||||
},
|
},
|
||||||
"period_filter": {
|
"period_filter": {
|
||||||
"name": "Period Filter",
|
"name": "Period Filter",
|
||||||
|
|
@ -1117,7 +961,7 @@
|
||||||
},
|
},
|
||||||
"connect_segments": {
|
"connect_segments": {
|
||||||
"name": "Connect Segments",
|
"name": "Connect Segments",
|
||||||
"description": "[ONLY WITH 'Insert NULL Values'] When enabled, adds connecting points at segment boundaries to visually connect different price level segments in stepline charts. When price goes DOWN at a boundary, adds a point with the lower price at the end of the current segment. When price goes UP, adds a hold point before the gap. This creates smooth visual transitions between segments instead of abrupt gaps."
|
"description": "[ONLY WITH insert_nulls='segments'] When enabled, adds connecting points at segment boundaries to visually connect different price level segments in stepline charts. When price goes DOWN at a boundary, adds a point with the lower price at the end of the current segment. When price goes UP, adds a hold point before the gap. This creates smooth visual transitions between segments instead of abrupt gaps."
|
||||||
},
|
},
|
||||||
"add_trailing_null": {
|
"add_trailing_null": {
|
||||||
"name": "Add Trailing Null Point",
|
"name": "Add Trailing Null Point",
|
||||||
|
|
@ -1146,10 +990,6 @@
|
||||||
"average_field": {
|
"average_field": {
|
||||||
"name": "Average Field Name",
|
"name": "Average Field Name",
|
||||||
"description": "Custom name for the average field in the output. Defaults to 'average' if not specified. Only used when include_average is enabled."
|
"description": "Custom name for the average field in the output. Defaults to 'average' if not specified. Only used when include_average is enabled."
|
||||||
},
|
|
||||||
"metadata": {
|
|
||||||
"name": "Metadata",
|
|
||||||
"description": "Control metadata inclusion in the response. 'include' (default): Returns both chart data and metadata with price statistics, currency info, Y-axis suggestions, and time range. 'only': Returns only metadata without processing chart data (fast, useful for dynamic Y-axis configuration). 'none': Returns only chart data without metadata."
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
@ -1162,16 +1002,6 @@
|
||||||
"description": "The config entry ID for the Tibber integration."
|
"description": "The config entry ID for the Tibber integration."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"debug_clear_tomorrow": {
|
|
||||||
"name": "Debug: Clear Tomorrow Data",
|
|
||||||
"description": "DEBUG/TESTING: Removes tomorrow's price data from the interval pool cache. Use this to test the tomorrow data refresh cycle without waiting for the next day. After calling this service, the lifecycle sensor will show 'searching_tomorrow' (after 13:00) and the next Timer #1 cycle will fetch new data from the API.",
|
|
||||||
"fields": {
|
|
||||||
"entry_id": {
|
|
||||||
"name": "Entry ID",
|
|
||||||
"description": "Optional config entry ID. If not provided, uses the first available entry."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"selector": {
|
"selector": {
|
||||||
|
|
@ -1184,9 +1014,7 @@
|
||||||
"options": {
|
"options": {
|
||||||
"yesterday": "Yesterday",
|
"yesterday": "Yesterday",
|
||||||
"today": "Today",
|
"today": "Today",
|
||||||
"tomorrow": "Tomorrow",
|
"tomorrow": "Tomorrow"
|
||||||
"rolling_window": "Rolling Window",
|
|
||||||
"rolling_window_autozoom": "Rolling Window (Auto-Zoom)"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"resolution": {
|
"resolution": {
|
||||||
|
|
@ -1236,13 +1064,6 @@
|
||||||
"peak_price": "Peak Price Periods"
|
"peak_price": "Peak Price Periods"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"metadata": {
|
|
||||||
"options": {
|
|
||||||
"include": "Include (data + metadata)",
|
|
||||||
"only": "Only metadata",
|
|
||||||
"none": "None (data only)"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"volatility": {
|
"volatility": {
|
||||||
"options": {
|
"options": {
|
||||||
"low": "Low",
|
"low": "Low",
|
||||||
|
|
@ -1260,18 +1081,6 @@
|
||||||
"expensive": "Expensive",
|
"expensive": "Expensive",
|
||||||
"very_expensive": "Very expensive"
|
"very_expensive": "Very expensive"
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"currency_display_mode": {
|
|
||||||
"options": {
|
|
||||||
"base": "Base Currency (€, kr)",
|
|
||||||
"subunit": "Subunit Currency (ct, øre)"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"average_sensor_display": {
|
|
||||||
"options": {
|
|
||||||
"median": "Median",
|
|
||||||
"mean": "Arithmetic Mean"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"title": "Tibber Price Information & Ratings"
|
"title": "Tibber Price Information & Ratings"
|
||||||
|
|
|
||||||
|
|
@ -11,14 +11,14 @@
|
||||||
},
|
},
|
||||||
"new_token": {
|
"new_token": {
|
||||||
"title": "Skriv inn API-token",
|
"title": "Skriv inn API-token",
|
||||||
"description": "Sett opp Tibber Prisinformasjon & Vurderinger.\n\nFor å generere et API-tilgangstoken, besøk [{tibber_url}]({tibber_url}).",
|
"description": "Sett opp Tibber Prisinformasjon & Vurderinger.\n\nFor å generere et API-tilgangstoken, besøk https://developer.tibber.com.",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API-tilgangstoken"
|
"access_token": "API-tilgangstoken"
|
||||||
},
|
},
|
||||||
"submit": "Valider token"
|
"submit": "Valider token"
|
||||||
},
|
},
|
||||||
"user": {
|
"user": {
|
||||||
"description": "Sett opp Tibber Prisinformasjon & Vurderinger.\n\nFor å generere et API-tilgangstoken, besøk [{tibber_url}]({tibber_url}).",
|
"description": "Sett opp Tibber Prisinformasjon & Vurderinger.\n\nFor å generere et API-tilgangstoken, besøk https://developer.tibber.com.",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API-tilgangstoken"
|
"access_token": "API-tilgangstoken"
|
||||||
},
|
},
|
||||||
|
|
@ -42,7 +42,7 @@
|
||||||
},
|
},
|
||||||
"reauth_confirm": {
|
"reauth_confirm": {
|
||||||
"title": "Autentiser Tibber Prisintegrasjonen på nytt",
|
"title": "Autentiser Tibber Prisintegrasjonen på nytt",
|
||||||
"description": "Tilgangstokenet for Tibber er ikke lenger gyldig. Vennligst oppgi et nytt API-tilgangstoken for å fortsette å bruke denne integrasjonen.\n\nFor å generere et nytt API-tilgangstoken, besøk [{tibber_url}]({tibber_url}).",
|
"description": "Tilgangstokenet for Tibber er ikke lenger gyldig. Vennligst oppgi et nytt API-tilgangstoken for å fortsette å bruke denne integrasjonen.\n\nFor å generere et nytt API-tilgangstoken, besøk https://developer.tibber.com.",
|
||||||
"data": {
|
"data": {
|
||||||
"access_token": "API-tilgangstoken"
|
"access_token": "API-tilgangstoken"
|
||||||
},
|
},
|
||||||
|
|
@ -77,23 +77,7 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"common": {
|
"common": {
|
||||||
"step_progress": "{step_num} / {total_steps}",
|
"step_progress": "{step_num} / {total_steps}"
|
||||||
"override_warning_template": "⚠️ {fields} styres av konfigurasjons-entitet",
|
|
||||||
"override_warning_and": "og",
|
|
||||||
"override_field_label_best_price_min_period_length": "Minste periodelengde",
|
|
||||||
"override_field_label_best_price_max_level_gap_count": "Gaptoleranse",
|
|
||||||
"override_field_label_best_price_flex": "Fleksibilitet",
|
|
||||||
"override_field_label_best_price_min_distance_from_avg": "Minimumsavstand",
|
|
||||||
"override_field_label_enable_min_periods_best": "Oppnå minimum antall",
|
|
||||||
"override_field_label_min_periods_best": "Minimumperioder",
|
|
||||||
"override_field_label_relaxation_attempts_best": "Avslapningsforsøk",
|
|
||||||
"override_field_label_peak_price_min_period_length": "Minste periodelengde",
|
|
||||||
"override_field_label_peak_price_max_level_gap_count": "Gaptoleranse",
|
|
||||||
"override_field_label_peak_price_flex": "Fleksibilitet",
|
|
||||||
"override_field_label_peak_price_min_distance_from_avg": "Minimumsavstand",
|
|
||||||
"override_field_label_enable_min_periods_peak": "Oppnå minimum antall",
|
|
||||||
"override_field_label_min_periods_peak": "Minimumperioder",
|
|
||||||
"override_field_label_relaxation_attempts_peak": "Avslapningsforsøk"
|
|
||||||
},
|
},
|
||||||
"config_subentries": {
|
"config_subentries": {
|
||||||
"home": {
|
"home": {
|
||||||
|
|
@ -148,111 +132,85 @@
|
||||||
"options": {
|
"options": {
|
||||||
"step": {
|
"step": {
|
||||||
"init": {
|
"init": {
|
||||||
"menu_options": {
|
|
||||||
"general_settings": "⚙️ Generelle innstillinger",
|
|
||||||
"display_settings": "💱 Valutavisning",
|
|
||||||
"current_interval_price_rating": "📊 Prisvurdering",
|
|
||||||
"price_level": "🏷️ Prisnivå",
|
|
||||||
"volatility": "💨 Prisvolatilitet",
|
|
||||||
"best_price": "💚 Beste prisperiode",
|
|
||||||
"peak_price": "🔴 Toppprisperiode",
|
|
||||||
"price_trend": "📈 Pristrend",
|
|
||||||
"chart_data_export": "📊 Diagramdata-eksportsensor",
|
|
||||||
"reset_to_defaults": "🔄 Tilbakestill til standard",
|
|
||||||
"finish": "⬅️ Tilbake"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"general_settings": {
|
|
||||||
"title": "⚙️ Generelle innstillinger",
|
"title": "⚙️ Generelle innstillinger",
|
||||||
"description": "**Konfigurer generelle innstillinger for Tibber prisinformasjon og vurderinger.**\n\n---\n\n**Bruker:** {user_login}",
|
"description": "_{step_progress}_\n\n**Konfigurer generelle innstillinger for Tibber prisinformasjon og vurderinger.**\n\n---\n\n**Bruker:** {user_login}",
|
||||||
"data": {
|
"data": {
|
||||||
"extended_descriptions": "Utvidede beskrivelser",
|
"extended_descriptions": "Utvidede beskrivelser"
|
||||||
"average_sensor_display": "Gjennomsnittssensor-visning"
|
|
||||||
},
|
},
|
||||||
"data_description": {
|
"data_description": {
|
||||||
"extended_descriptions": "Styrer om entitetsattributter inkluderer detaljerte forklaringer og brukstips.\n\n• Deaktivert (standard): Bare kort beskrivelse\n• Aktivert: Detaljert forklaring + praktiske brukseksempler\n\nEksempel:\nDeaktivert = 1 attributt\nAktivert = 2 ekstra attributter",
|
"extended_descriptions": "Styrer om entitetsattributter inkluderer detaljerte forklaringer og brukstips.\n\n• Deaktivert (standard): Bare kort beskrivelse\n• Aktivert: Detaljert forklaring + praktiske brukseksempler\n\nEksempel:\nDeaktivert = 1 attributt\nAktivert = 2 ekstra attributter"
|
||||||
"average_sensor_display": "Velg hvilket statistisk mål som skal vises i sensortilstanden for gjennomsnittspris-sensorer. Den andre verdien vises som attributt.\n\n• **Median (standard)**: Viser den 'typiske' prisen, motstandsdyktig mot ekstreme topper - best for visning og menneskelig tolkning\n• **Aritmetisk gjennomsnitt**: Viser det sanne matematiske gjennomsnittet inkludert alle priser - best når du trenger eksakte kostnadsberegninger\n\nFor automatiseringer, bruk attributtet `price_mean` eller `price_median` for å få tilgang til begge verdier uavhengig av denne innstillingen."
|
|
||||||
},
|
},
|
||||||
"submit": "↩ Lagre & tilbake"
|
"submit": "Videre til trinn 2"
|
||||||
},
|
|
||||||
"display_settings": {
|
|
||||||
"title": "💱 Valutavisningsinnstillinger",
|
|
||||||
"description": "_{step_progress}_\n\n**Konfigurer hvordan strømpriser vises - i basisvaluta (€, kr) eller underenhet (ct, øre).**\n\n---",
|
|
||||||
"data": {
|
|
||||||
"currency_display_mode": "Visningsmodus"
|
|
||||||
},
|
|
||||||
"data_description": {
|
|
||||||
"currency_display_mode": "Velg hvordan priser vises:\n\n• **Basisvaluta** (€/kWh, kr/kWh): Desimalverdier (f.eks. 0,25 €/kWh) - forskjeller synlige fra 3.-4. desimalplass\n• **Underenhet** (ct/kWh, øre/kWh): Større verdier (f.eks. 25,00 ct/kWh) - forskjeller allerede synlige fra 1. desimalplass\n\nStandard avhenger av valutaen din:\n• EUR → Underenhet (cent) - tysk/nederlandsk preferanse\n• NOK/SEK/DKK → Basisvaluta (kroner) - skandinavisk preferanse\n• USD/GBP → Basisvaluta\n\n**💡 Tips:** Ved valg av underenhet kan du aktivere den ekstra sensoren \"Nåværende strømpris (Energi-dashboard)\" (deaktivert som standard)."
|
|
||||||
},
|
|
||||||
"submit": "↩ Lagre & tilbake"
|
|
||||||
},
|
},
|
||||||
"current_interval_price_rating": {
|
"current_interval_price_rating": {
|
||||||
"title": "📊 Prisvurderingsinnstillinger",
|
"title": "📊 Prisvurderings-terskler",
|
||||||
"description": "**Konfigurer terskler og stabilisering for prisvurderingsnivåer (lav/normal/høy) basert på sammenligning med etterfølgende 24-timers gjennomsnitt.**{entity_warning}",
|
"description": "_{step_progress}_\n\n**Konfigurer terskler for prisvurderingsnivåer (lav/normal/høy) basert på sammenligning med etterfølgende 24-timers gjennomsnitt.**\n\n---",
|
||||||
"data": {
|
|
||||||
"price_rating_threshold_low": "Lav-terskel",
|
|
||||||
"price_rating_threshold_high": "Høy-terskel",
|
|
||||||
"price_rating_hysteresis": "Hysterese",
|
|
||||||
"price_rating_gap_tolerance": "Gap-toleranse"
|
|
||||||
},
|
|
||||||
"data_description": {
|
|
||||||
"price_rating_threshold_low": "Prosentverdi for hvor mye gjeldende pris må være under det etterfølgende 24-timers gjennomsnittet for å kvalifisere som 'lav' vurdering. Eksempel: -10 betyr minst 10% under gjennomsnitt. Sensorer med denne vurderingen indikerer gunstige tidsvinduer. Standard: -10%",
|
|
||||||
"price_rating_threshold_high": "Prosentverdi for hvor mye gjeldende pris må være over det etterfølgende 24-timers gjennomsnittet for å kvalifisere som 'høy' vurdering. Eksempel: 10 betyr minst 10% over gjennomsnitt. Sensorer med denne vurderingen advarer om dyre tidsvinduer. Standard: 10%",
|
|
||||||
"price_rating_hysteresis": "Prosentbånd rundt terskler for å unngå raske tilstandsendringer. Når vurderingen allerede er LAV, må prisen stige over (terskel + hysterese) for å bytte til NORMAL. Tilsvarende krever HØY at prisen faller under (terskel - hysterese) for å forlate tilstanden. Dette gir stabilitet for automatiseringer som reagerer på vurderingsendringer. Sett til 0 for å deaktivere. Standard: 2%",
|
|
||||||
"price_rating_gap_tolerance": "Maksimalt antall påfølgende intervaller som kan 'jevnes ut' hvis de avviker fra omkringliggende vurderinger. Små isolerte vurderingsendringer slås sammen med den dominerende nabogruppen. Dette gir stabilitet for automatiseringer ved å forhindre at korte vurderingstopper utløser unødvendige handlinger. Eksempel: 1 betyr at et enkelt 'normal'-intervall omgitt av 'høy'-intervaller korrigeres til 'høy'. Sett til 0 for å deaktivere. Standard: 1"
|
|
||||||
},
|
|
||||||
"submit": "↩ Lagre & tilbake"
|
|
||||||
},
|
|
||||||
"best_price": {
|
|
||||||
"title": "💚 Beste Prisperiode Innstillinger",
|
|
||||||
"description": "**Konfigurer innstillinger for Beste Prisperiode binærsensor. Denne sensoren er aktiv i perioder med de laveste strømprisene.**{entity_warning}{override_warning}\n\n---",
|
|
||||||
"sections": {
|
"sections": {
|
||||||
"period_settings": {
|
"price_rating_thresholds": {
|
||||||
"name": "Periodeinnstillinger",
|
"name": "Prisvurderings-terskler",
|
||||||
"description": "Konfigurer periodelengde og prisnivåbegrensninger.",
|
"description": "Definer prisvurderingsnivåer.",
|
||||||
"data": {
|
"data": {
|
||||||
"best_price_min_period_length": "Minimum periodelengde",
|
"price_rating_threshold_low": "Lav-terskel",
|
||||||
"best_price_max_level": "Prisnivåfilter",
|
"price_rating_threshold_high": "Høy-terskel"
|
||||||
"best_price_max_level_gap_count": "Gaptoleranse"
|
|
||||||
},
|
},
|
||||||
"data_description": {
|
"data_description": {
|
||||||
"best_price_min_period_length": "Minimum varighet for at en periode skal regnes som 'beste pris'. Lengre perioder er mer praktiske for å kjøre apparater som oppvaskmaskiner eller varmepumper. Beste pris-perioder krever minimum 60 minutter (sammenlignet med 30 minutter for topppris-advarsler) fordi de skal gi meningsfulle tidsvinduer for forbruksplanlegging, ikke bare kortvarige muligheter.",
|
"price_rating_threshold_low": "Prosentverdi for hvor mye gjeldende pris må være under det etterfølgende 24-timers gjennomsnittet for å kvalifisere som 'lav' vurdering. Eksempel: 5 betyr minst 5% under gjennomsnitt. Sensorer med denne vurderingen indikerer gunstige tidsvinduer. Standard: 5%",
|
||||||
"best_price_max_level": "Vis kun beste pris-perioder hvis de inneholder intervaller med prisnivåer ≤ valgt verdi. For eksempel: å velge '**Billig**' betyr at perioden må ha minst étt '**Veldig billig**' eller '**Billig**' intervall. Dette sikrer at 'beste pris'-perioder ikke bare er relativt billige for dagen, men faktisk billige i absolutte tall. Velg '**Alle**' for å vise beste priser uavhengig av deres absolutte prisnivå.",
|
"price_rating_threshold_high": "Prosentverdi for hvor mye gjeldende pris må være over det etterfølgende 24-timers gjennomsnittet for å kvalifisere som 'høy' vurdering. Eksempel: 10 betyr minst 10% over gjennomsnitt. Sensorer med denne vurderingen advarer om dyre tidsvinduer. Standard: 10%"
|
||||||
"best_price_max_level_gap_count": "Maksimalt antall påfølgende intervaller som kan avvike med nøyaktig étt nivåtrinn fra det nødvendige nivået. For eksempel: med '**Billig**' filter og gapantall 1, aksepteres sekvensen '**Billig**, **Billig**, **Normal**, **Billig**' (**Normal** er étt trinn over **Billig**). Dette forhindrer at perioder blir delt opp av tilfeldige nivåavvik. **Merk:** Gaptoleranse krever perioder ≥90 minutter (6 intervaller) for å oppdage avvik effektivt. Standard: 0 (streng filtrering, ingen toleranse)."
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"flexibility_settings": {
|
|
||||||
"name": "Fleksibilitetsinnstillinger",
|
|
||||||
"description": "Konfigurer prissammenligningsgrenser og filtrering.",
|
|
||||||
"data": {
|
|
||||||
"best_price_flex": "Fleksibilitet",
|
|
||||||
"best_price_min_distance_from_avg": "Minimumsavstand"
|
|
||||||
},
|
|
||||||
"data_description": {
|
|
||||||
"best_price_flex": "Maksimalt over den daglige minimumsprisen der intervaller fortsatt kvalifiserer som 'beste pris'. Anbefalt: 15-20 med lemping aktivert (standard), eller 25-35 uten lemping. Maksimum: 50 (hard grense for pålitelig periodegjenkjenning).",
|
|
||||||
"best_price_min_distance_from_avg": "Sikrer at perioder er betydelig billigere enn daglig gjennomsnitt, ikke bare marginalt under det. Dette filtrerer støy og forhindrer at litt-under-gjennomsnittet perioder markeres som 'beste pris' på dager med flate priser. Høyere verdier = strengere filtrering (bare virkelig billige perioder kvalifiserer). Standard: 5 betyr at perioder må være minst 5% under daglig gjennomsnitt."
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"relaxation_and_target_periods": {
|
|
||||||
"name": "Lemping & Målperioder",
|
|
||||||
"description": "Konfigurer automatisk filterlemping og målperiodeantall. Aktiver 'Oppnå minimumsantall' for å aktivere lemping.",
|
|
||||||
"data": {
|
|
||||||
"enable_min_periods_best": "Oppnå minimumsantall",
|
|
||||||
"min_periods_best": "Minimumsperioder",
|
|
||||||
"relaxation_attempts_best": "Lempingsforsøk"
|
|
||||||
},
|
|
||||||
"data_description": {
|
|
||||||
"enable_min_periods_best": "Når aktivert vil filtre gradvis bli lempet hvis ikke nok perioder blir funnet. Dette forsøker å nå det ønskede minimumsantall perioder, som kan inkludere mindre optimale tidsvinduer som beste pris-perioder.",
|
|
||||||
"min_periods_best": "Minimumsantall beste pris-perioder å sikte på per dag. Filtre vil bli lempet steg for steg for å forsøke å oppnå dette antallet. Kun aktiv når 'Oppnå minimumsantall' er aktivert. Standard: 1",
|
|
||||||
"relaxation_attempts_best": "Hvor mange fleksnivåer (forsøk) å prøve før man gir opp. Hvert forsøk kjører alle filterkombinasjoner på det nye fleksnivået. Flere forsøk øker sjansen for å finne flere perioder på bekostning av lengre behandlingstid."
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"submit": "↩ Lagre & tilbake"
|
"submit": "Fortsett →"
|
||||||
|
},
|
||||||
|
"best_price": {
|
||||||
|
"title": "💚 Beste Prisperiode Innstillinger",
|
||||||
|
"description": "_{step_progress}_\n\nKonfigurer innstillinger for **Beste Prisperiode** binærsensor. Denne sensoren er aktiv i perioder med de laveste strømprisene.\n\n---",
|
||||||
|
"sections": {
|
||||||
|
"period_settings": {
|
||||||
|
"name": "Period Settings",
|
||||||
|
"description": "Configure period duration and price level constraints.",
|
||||||
|
"data": {
|
||||||
|
"best_price_min_period_length": "Minimum Period Length",
|
||||||
|
"best_price_max_level": "Price Level Filter",
|
||||||
|
"best_price_max_level_gap_count": "Gap Tolerance"
|
||||||
|
},
|
||||||
|
"data_description": {
|
||||||
|
"best_price_min_period_length": "Minimum duration for a period to be considered as 'best price'. Longer periods are more practical for running appliances like dishwashers or heat pumps. Best price periods require minimum 60 minutes (compared to 30 minutes for peak price alerts) because they should provide meaningful time windows for consumption planning, not just brief opportunities.",
|
||||||
|
"best_price_max_level": "Only show best price periods if they contain intervals with price levels ≤ selected value. For example, selecting 'Cheap' means the period must have at least one 'VERY_CHEAP' or 'CHEAP' interval. This ensures 'best price' periods are not just relatively cheap for the day, but actually cheap in absolute terms. Select 'Any' to show best prices regardless of their absolute price level.",
|
||||||
|
"best_price_max_level_gap_count": "Maximum number of consecutive intervals allowed that deviate by exactly one level step from the required level. For example: with 'Cheap' filter and gap count 1, a sequence 'CHEAP, CHEAP, NORMAL, CHEAP' is accepted (NORMAL is one step above CHEAP). This prevents periods from being split by occasional level deviations. **Note:** Gap tolerance requires periods ≥90 minutes (6 intervals) to detect outliers effectively. Default: 0 (strict filtering, no tolerance)."
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flexibility_settings": {
|
||||||
|
"name": "Flexibility Settings",
|
||||||
|
"description": "Configure price comparison thresholds and filtering.",
|
||||||
|
"data": {
|
||||||
|
"best_price_flex": "Flexibility",
|
||||||
|
"best_price_min_distance_from_avg": "Minimum Distance"
|
||||||
|
},
|
||||||
|
"data_description": {
|
||||||
|
"best_price_flex": "Maximum above the daily minimum price that intervals can be and still qualify as 'best price'. Recommended: 15-20 with relaxation enabled (default), or 25-35 without relaxation. Maximum: 50 (hard cap for reliable period detection).",
|
||||||
|
"best_price_min_distance_from_avg": "Ensures periods are significantly cheaper than the daily average, not just marginally below it. This filters out noise and prevents marking slightly-below-average periods as 'best price' on days with flat prices. Higher values = stricter filtering (only truly cheap periods qualify). Default: 5 means periods must be at least 5% below the daily average."
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"relaxation_and_target_periods": {
|
||||||
|
"name": "Relaxation & Target Periods",
|
||||||
|
"description": "Configure automatic filter relaxation and target period counts. Enable 'Achieve Minimum Count' to activate relaxation.",
|
||||||
|
"data": {
|
||||||
|
"enable_min_periods_best": "Achieve Minimum Count",
|
||||||
|
"min_periods_best": "Minimum Periods",
|
||||||
|
"relaxation_attempts_best": "Relaxation Attempts"
|
||||||
|
},
|
||||||
|
"data_description": {
|
||||||
|
"enable_min_periods_best": "When enabled, filters will be gradually relaxed if not enough periods are found. This attempts to reach the desired minimum number of periods, which may include less optimal time windows as best-price periods.",
|
||||||
|
"min_periods_best": "Minimum number of best price periods to aim for per day. Filters will be relaxed step-by-step to try achieving this count. Only active when 'Achieve Minimum Count' is enabled. Default: 1",
|
||||||
|
"relaxation_attempts_best": "How many flex levels (attempts) to try before giving up. Each attempt runs all filter combinations at the new flex level. More attempts increase the chance of finding additional periods at the cost of longer processing time."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"submit": "Fortsett →"
|
||||||
},
|
},
|
||||||
"peak_price": {
|
"peak_price": {
|
||||||
"title": "🔴 Toppprisperiode Innstillinger",
|
"title": "🔴 Toppprisperiode Innstillinger",
|
||||||
"description": "**Konfigurer innstillinger for Toppprisperiode binærsensor. Denne sensoren er aktiv i perioder med de høyeste strømprisene.**{entity_warning}{override_warning}\n\n---",
|
"description": "_{step_progress}_\n\nKonfigurer innstillinger for **Toppprisperiode** binærsensor. Denne sensoren er aktiv i perioder med de høyeste strømprisene.\n\n---",
|
||||||
"sections": {
|
"sections": {
|
||||||
"period_settings": {
|
"period_settings": {
|
||||||
"name": "Periodeinnstillinger",
|
"name": "Periodeinnstillinger",
|
||||||
|
|
@ -264,8 +222,8 @@
|
||||||
},
|
},
|
||||||
"data_description": {
|
"data_description": {
|
||||||
"peak_price_min_period_length": "Minimum varighet for at en periode skal regnes som 'topppris'. Topppris-advarsler er tillatt for kortere perioder (minimum 30 minutter sammenlignet med 60 minutter for beste pris) fordi korte dyre topper er verdt å advare om, selv om de er for korte for forbruksplanlegging.",
|
"peak_price_min_period_length": "Minimum varighet for at en periode skal regnes som 'topppris'. Topppris-advarsler er tillatt for kortere perioder (minimum 30 minutter sammenlignet med 60 minutter for beste pris) fordi korte dyre topper er verdt å advare om, selv om de er for korte for forbruksplanlegging.",
|
||||||
"peak_price_min_level": "Vis kun topprisperioder hvis de inneholder intervaller med prisnivåer ≥ valgt verdi. For eksempel: å velge '**Dyr**' betyr at perioden må ha minst étt '**Dyr**' eller '**Veldig dyr**' intervall. Dette sikrer at 'topppris'-perioder ikke bare er relativt dyre for dagen, men faktisk dyre i absolutte tall. Velg '**Alle**' for å vise topppriser uavhengig av deres absolutte prisnivå.",
|
"peak_price_min_level": "Vis kun topprisperioder hvis de inneholder intervaller med prisnivåer ≥ valgt verdi. For eksempel: å velge 'Dyr' betyr at perioden må ha minst étt 'DYR' eller 'VELDIG_DYR' intervall. Dette sikrer at 'topppris'-perioder ikke bare er relativt dyre for dagen, men faktisk dyre i absolutte tall. Velg 'Alle' for å vise topppriser uavhengig av deres absolutte prisnivå.",
|
||||||
"peak_price_max_level_gap_count": "Maksimalt antall påfølgende intervaller som kan avvike med nøyaktig étt nivåtrinn fra det nødvendige nivået. For eksempel: med '**Dyr**' filter og gapantall 1, aksepteres sekvensen '**Dyr**, **Dyr**, **Normal**, **Dyr**' (**Normal** er étt trinn under **Dyr**). Dette forhindrer at perioder blir delt opp av tilfeldige nivåavvik. **Merk:** Gaptoleranse krever perioder ≥90 minutter (6 intervaller) for å oppdage avvik effektivt. Standard: 0 (streng filtrering, ingen toleranse)."
|
"peak_price_max_level_gap_count": "Maksimalt antall påfølgende intervaller som kan avvike med nøyaktig étt nivåtrinn fra det nødvendige nivået. For eksempel: med 'Dyr' filter og gapantall 1, aksepteres sekvensen 'DYR, DYR, NORMAL, DYR' (NORMAL er étt trinn under DYR). Dette forhindrer at perioder blir delt opp av tilfeldige nivåavvik. **Merk:** Gaptoleranse krever perioder ≥90 minutter (6 intervaller) for å oppdage avvik effektivt. Standard: 0 (streng filtrering, ingen toleranse)."
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flexibility_settings": {
|
"flexibility_settings": {
|
||||||
|
|
@ -295,63 +253,52 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"submit": "↩ Lagre & tilbake"
|
"submit": "Fortsett →"
|
||||||
},
|
},
|
||||||
"price_trend": {
|
"price_trend": {
|
||||||
"title": "📈 Pristrendterskler",
|
"title": "📈 Pristrendterskler",
|
||||||
"description": "**Konfigurer terskler for pristrendsensorer. Disse sensorene sammenligner nåværende pris med gjennomsnittet av de neste N timene for å bestemme om prisene stiger sterkt, stiger, er stabile, faller eller faller sterkt.**{entity_warning}",
|
"description": "_{step_progress}_\n\n**Konfigurer terskler for pristrendsensorer. Disse sensorene sammenligner nåværende pris med gjennomsnittet av de neste N timene for å bestemme om prisene stiger, faller eller er stabile.**\n\n---",
|
||||||
"data": {
|
"sections": {
|
||||||
"price_trend_threshold_rising": "Stigende terskel",
|
"price_trend_thresholds": {
|
||||||
"price_trend_threshold_strongly_rising": "Sterkt stigende terskel",
|
"name": "Pristrendterskler",
|
||||||
"price_trend_threshold_falling": "Fallende terskel",
|
"description": "Definer pristrendnivåer.",
|
||||||
"price_trend_threshold_strongly_falling": "Sterkt fallende terskel"
|
"data": {
|
||||||
|
"price_trend_threshold_rising": "Stigende terskel",
|
||||||
|
"price_trend_threshold_falling": "Fallende terskel"
|
||||||
|
},
|
||||||
|
"data_description": {
|
||||||
|
"price_trend_threshold_rising": "Prosentverdi for gjennomsnittlig prisøkning per time som kvalifiserer trenden som 'stigende'. Eksempel: 5 betyr minst 5% økning per time. Sensorer med denne trenden indikerer at prisene vil stige raskt. Standard: 5%",
|
||||||
|
"price_trend_threshold_falling": "Prosentverdi for gjennomsnittlig prisnedgang per time som kvalifiserer trenden som 'synkende'. Eksempel: -5 betyr minst 5% nedgang per time. Sensorer med denne trenden indikerer at prisene vil synke raskt. Standard: -5%"
|
||||||
|
}
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"data_description": {
|
"submit": "Fortsett →"
|
||||||
"price_trend_threshold_rising": "Prosentverdi som gjennomsnittet av de neste N timene må være over den nåværende prisen for å kvalifisere som 'stigende' trend. Eksempel: 3 betyr gjennomsnittet er minst 3% høyere → prisene vil stige. Typiske verdier: 3-10%. Standard: 3%",
|
|
||||||
"price_trend_threshold_strongly_rising": "Prosentverdi som gjennomsnittet av de neste N timene må være over den nåværende prisen for å kvalifisere som 'sterkt stigende' trend. Må være høyere enn stigende terskel. Typiske verdier: 6-20%. Standard: 6%",
|
|
||||||
"price_trend_threshold_falling": "Prosentverdi (negativ) som gjennomsnittet av de neste N timene må være under den nåværende prisen for å kvalifisere som 'synkende' trend. Eksempel: -3 betyr gjennomsnittet er minst 3% lavere → prisene vil falle. Typiske verdier: -3 til -10%. Standard: -3%",
|
|
||||||
"price_trend_threshold_strongly_falling": "Prosentverdi (negativ) som gjennomsnittet av de neste N timene må være under den nåværende prisen for å kvalifisere som 'sterkt synkende' trend. Må være lavere (mer negativ) enn fallende terskel. Typiske verdier: -6 til -20%. Standard: -6%"
|
|
||||||
},
|
|
||||||
"submit": "↩ Lagre & tilbake"
|
|
||||||
},
|
},
|
||||||
"volatility": {
|
"volatility": {
|
||||||
"title": "💨 Volatilitets-terskler",
|
"title": "💨 Volatilitets-terskler",
|
||||||
"description": "**Konfigurer terskler for volatilitetsklassifisering.** Volatilitet måler relativ prisvariation ved hjelp av variasjonskoeffisienten (VK = standardavvik / gjennomsnitt × 100%). Disse tersklene er prosentverdier som fungerer på tvers av alle prisnivåer.\n\nBrukes av:\n• Volatilitetssensorer (klassifisering)\n• Trendsensorer (adaptiv terskel justering: <moderat = mer følsom, ≥høy = mindre følsom){entity_warning}",
|
"description": "_{step_progress}_\n\n**Konfigurer terskler for volatilitetsklassifisering. Volatilitet måler relativ prisvariation ved hjelp av variasjonskoeffisienten (VK = standardavvik / gjennomsnitt × 100%). Disse tersklene er prosentverdier som fungerer på tvers av alle prisnivåer.**\n\nBrukes av:\n• Volatilitetssensorer (klassifisering)\n• Trendsensorer (adaptiv terskel justering: <moderat = mer følsom, ≥høy = mindre følsom)\n\n---",
|
||||||
"data": {
|
"sections": {
|
||||||
"volatility_threshold_moderate": "Moderat terskel",
|
"volatility_thresholds": {
|
||||||
"volatility_threshold_high": "Høy terskel",
|
"name": "Volatilitetsterskler",
|
||||||
"volatility_threshold_very_high": "Veldig høy terskel"
|
"description": "Definer volatilitetsklassifiseringsnivåer.",
|
||||||
|
"data": {
|
||||||
|
"volatility_threshold_moderate": "Moderat terskel",
|
||||||
|
"volatility_threshold_high": "Høy terskel",
|
||||||
|
"volatility_threshold_very_high": "Veldig høy terskel"
|
||||||
|
},
|
||||||
|
"data_description": {
|
||||||
|
"volatility_threshold_moderate": "Grenseverdi for standardavvik (% av gjennomsnitt) for å klassifisere prisvariasjonen som 'moderat'. Eksempel: 10 betyr standardavvik ≥ 10% av gjennomsnitt. Dette indikerer økt prisustabilitet. Standard: 10%",
|
||||||
|
"volatility_threshold_high": "Grenseverdi for standardavvik (% av gjennomsnitt) for å klassifisere prisvariasjonen som 'høy'. Eksempel: 20 betyr standardavvik ≥ 20% av gjennomsnitt. Dette indikerer betydelige prissvingninger. Standard: 20%",
|
||||||
|
"volatility_threshold_very_high": "Grenseverdi for standardavvik (% av gjennomsnitt) for å klassifisere prisvariasjonen som 'veldig høy'. Eksempel: 30 betyr standardavvik ≥ 30% av gjennomsnitt. Dette indikerer ekstrem prisustabilitet. Standard: 30%"
|
||||||
|
}
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"data_description": {
|
"submit": "Fortsett →"
|
||||||
"volatility_threshold_moderate": "Variasjonskoeffisient (VK) der prisene anses som 'moderat volatile'. VK = (standardavvik / gjennomsnitt) × 100%. Eksempel: 15 betyr prissvingninger på ±15% rundt gjennomsnittet. Sensorer viser denne klassifiseringen, trendsensorer blir mer følsomme. Standard: 15%",
|
|
||||||
"volatility_threshold_high": "Variasjonskoeffisient (VK) der prisene anses som 'svært volatile'. Eksempel: 30 betyr prissvingninger på ±30% rundt gjennomsnittet. Større prishopp forventes, trendsensorer blir mindre følsomme. Standard: 30%",
|
|
||||||
"volatility_threshold_very_high": "Variasjonskoeffisient (VK) der prisene anses som 'veldig svært volatile'. Eksempel: 50 betyr ekstreme prissvingninger på ±50% rundt gjennomsnittet. På slike dager er sterke pristoppsannsynlige. Standard: 50%"
|
|
||||||
},
|
|
||||||
"submit": "↩ Lagre & tilbake"
|
|
||||||
},
|
},
|
||||||
"chart_data_export": {
|
"chart_data_export": {
|
||||||
"title": "📊 Diagram-dataeksport Sensor",
|
"title": "📊 Diagram-dataeksport Sensor",
|
||||||
"description": "Diagram-dataeksport-sensoren gir prisdata som sensorattributter.\n\n⚠️ **Merk:** Denne sensoren er en legacy-funksjon for kompatibilitet med eldre verktøy.\n\n**Anbefalt for nye oppsett:** Bruk `tibber_prices.get_chartdata` **tjenesten direkte** - den er mer fleksibel, effektiv og den moderne Home Assistant-tilnærmingen.\n\n**Når denne sensoren gir mening:**\n\n✅ Dashboardverktøyet ditt kan **kun** lese attributter (ingen tjenestekall)\n✅ Du trenger statiske data som oppdateres automatisk\n❌ **Ikke for automatiseringer:** Bruk `tibber_prices.get_chartdata` direkte der - mer fleksibel og effektiv!\n\n---\n\n{sensor_status_info}",
|
"description": "_{step_progress}_\n\nDiagram-dataeksport-sensoren gir prisdata som sensorattributter.\n\n⚠️ **Merk:** Denne sensoren er en legacy-funksjon for kompatibilitet med eldre verktøy.\n\n**Anbefalt for nye oppsett:** Bruk `tibber_prices.get_chartdata` **tjenesten direkte** - den er mer fleksibel, effektiv og den moderne Home Assistant-tilnærmingen.\n\n**Når denne sensoren gir mening:**\n\n✅ Dashboardverktøyet ditt kan **kun** lese attributter (ingen tjenestekall)\n✅ Du trenger statiske data som oppdateres automatisk\n❌ **Ikke for automatiseringer:** Bruk `tibber_prices.get_chartdata` direkte der - mer fleksibel og effektiv!\n\n---\n\n**Aktiver sensoren:**\n\n1. Åpne **Innstillinger → Enheter og tjenester → Tibber Prices**\n2. Velg ditt hjem → Finn **'Diagramdataeksport'** (Diagnostikk-seksjonen)\n3. **Aktiver sensoren** (deaktivert som standard)\n\n**Konfigurasjon (valgfritt):**\n\nStandardinnstillinger fungerer umiddelbart (i dag+i morgen, 15-minutters intervaller, bare priser).\n\nFor tilpasning, legg til i **`configuration.yaml`**:\n\n```yaml\ntibber_prices:\n chart_export:\n day:\n - today\n - tomorrow\n include_level: true\n include_rating_level: true\n```\n\n**Alle parametere:** Se `tibber_prices.get_chartdata` tjenestens dokumentasjon",
|
||||||
"submit": "↩ Ok & tilbake"
|
"submit": "Fullfør ✓"
|
||||||
},
|
|
||||||
"reset_to_defaults": {
|
|
||||||
"title": "🔄 Tilbakestill til standard",
|
|
||||||
"description": "⚠️ **Advarsel:** Dette vil tilbakestille **ALLE** innstillinger til fabrikkstandard.\n\n**Hva vil bli tilbakestilt:**\n• Alle prisvurderingsterskler\n• Alle volatilitetsterskler\n• Alle pristrendterskler\n• Alle innstillinger for beste prisperiode\n• Alle innstillinger for toppprisperiode\n• Visningsinnstillinger\n• Generelle innstillinger\n\n**Hva vil IKKE bli tilbakestilt:**\n• Ditt Tibber API-token\n• Valgt hjem\n• Valuta\n\n**💡 Tips:** Dette er nyttig hvis du vil starte på nytt etter å ha eksperimentert med innstillinger.",
|
|
||||||
"data": {
|
|
||||||
"confirm_reset": "Ja, tilbakestill alt til standard"
|
|
||||||
},
|
|
||||||
"submit": "Tilbakestill nå"
|
|
||||||
},
|
|
||||||
"price_level": {
|
|
||||||
"title": "🏷️ Prisnivå-innstillinger",
|
|
||||||
"description": "**Konfigurer stabilisering for Tibbers prisnivå-klassifisering (veldig billig/billig/normal/dyr/veldig dyr).**\n\nTibbers API gir et prisnivå-felt for hvert intervall. Denne innstillingen jevner ut korte svingninger for å forhindre ustabilitet i automatiseringer.{entity_warning}",
|
|
||||||
"data": {
|
|
||||||
"price_level_gap_tolerance": "Gap-toleranse"
|
|
||||||
},
|
|
||||||
"data_description": {
|
|
||||||
"price_level_gap_tolerance": "Maksimalt antall påfølgende intervaller som kan 'jevnes ut' hvis de avviker fra omkringliggende prisnivåer. Små isolerte nivåendringer slås sammen med den dominerende nabogruppen. Eksempel: 1 betyr at et enkelt 'normal'-intervall omgitt av 'billig'-intervaller korrigeres til 'billig'. Sett til 0 for å deaktivere. Standard: 1"
|
|
||||||
},
|
|
||||||
"submit": "↩ Lagre & tilbake"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"error": {
|
"error": {
|
||||||
|
|
@ -361,10 +308,10 @@
|
||||||
"cannot_connect": "Kunne ikke koble til",
|
"cannot_connect": "Kunne ikke koble til",
|
||||||
"invalid_access_token": "Ugyldig tilgangstoken",
|
"invalid_access_token": "Ugyldig tilgangstoken",
|
||||||
"different_home": "Tilgangstokenet er ikke gyldig for hjem-ID-en denne integrasjonen er konfigurert for.",
|
"different_home": "Tilgangstokenet er ikke gyldig for hjem-ID-en denne integrasjonen er konfigurert for.",
|
||||||
"invalid_flex": "Fleksibilitetsprosent må være mellom -50% og +50%",
|
"invalid_flex": "TRANSLATE: Flexibility percentage must be between -50% and +50%",
|
||||||
"invalid_best_price_distance": "Avstandsprosent må være mellom -50% og 0% (negativ = under gjennomsnitt)",
|
"invalid_best_price_distance": "TRANSLATE: Distance percentage must be between -50% and 0% (negative = below average)",
|
||||||
"invalid_peak_price_distance": "Avstandsprosent må være mellom 0% og 50% (positiv = over gjennomsnitt)",
|
"invalid_peak_price_distance": "TRANSLATE: Distance percentage must be between 0% and 50% (positive = above average)",
|
||||||
"invalid_min_periods": "Minimumsantall perioder må være mellom 1 og 10",
|
"invalid_min_periods": "TRANSLATE: Minimum periods count must be between 1 and 10",
|
||||||
"invalid_period_length": "Periodelengden må være minst 15 minutter (multipler av 15).",
|
"invalid_period_length": "Periodelengden må være minst 15 minutter (multipler av 15).",
|
||||||
"invalid_gap_count": "Gaptoleranse må være mellom 0 og 8",
|
"invalid_gap_count": "Gaptoleranse må være mellom 0 og 8",
|
||||||
"invalid_relaxation_attempts": "Lempingsforsøk må være mellom 1 og 12",
|
"invalid_relaxation_attempts": "Lempingsforsøk må være mellom 1 og 12",
|
||||||
|
|
@ -376,17 +323,10 @@
|
||||||
"invalid_volatility_threshold_very_high": "Svært høy volatilitetsgrense må være mellom 35% og 80%",
|
"invalid_volatility_threshold_very_high": "Svært høy volatilitetsgrense må være mellom 35% og 80%",
|
||||||
"invalid_volatility_thresholds": "Grensene må være i stigende rekkefølge: moderat < høy < svært høy",
|
"invalid_volatility_thresholds": "Grensene må være i stigende rekkefølge: moderat < høy < svært høy",
|
||||||
"invalid_price_trend_rising": "Stigende trendgrense må være mellom 1% og 50%",
|
"invalid_price_trend_rising": "Stigende trendgrense må være mellom 1% og 50%",
|
||||||
"invalid_price_trend_falling": "Fallende trendgrense må være mellom -50% og -1%",
|
"invalid_price_trend_falling": "Fallende trendgrense må være mellom -50% og -1%"
|
||||||
"invalid_price_trend_strongly_rising": "Sterkt stigende trendgrense må være mellom 2% og 100%",
|
|
||||||
"invalid_price_trend_strongly_falling": "Sterkt fallende trendgrense må være mellom -100% og -2%",
|
|
||||||
"invalid_trend_strongly_rising_less_than_rising": "Sterkt stigende-grense må være høyere enn stigende-grense",
|
|
||||||
"invalid_trend_strongly_falling_greater_than_falling": "Sterkt fallende-grense må være lavere (mer negativ) enn fallende-grense"
|
|
||||||
},
|
},
|
||||||
"abort": {
|
"abort": {
|
||||||
"entry_not_found": "Tibber-konfigurasjonsoppføring ikke funnet.",
|
"entry_not_found": "Tibber-konfigurasjonsoppføring ikke funnet."
|
||||||
"reset_cancelled": "Tilbakestilling avbrutt. Ingen endringer ble gjort i konfigurasjonen din.",
|
|
||||||
"reset_successful": "✅ Alle innstillinger har blitt tilbakestilt til fabrikkstandard. Konfigurasjonen din er nå som en ny installasjon.",
|
|
||||||
"finished": "Konfigurasjon fullført."
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"entity": {
|
"entity": {
|
||||||
|
|
@ -394,7 +334,7 @@
|
||||||
"current_interval_price": {
|
"current_interval_price": {
|
||||||
"name": "Nåværende strømpris"
|
"name": "Nåværende strømpris"
|
||||||
},
|
},
|
||||||
"current_interval_price_base": {
|
"current_interval_price_major": {
|
||||||
"name": "Nåværende strømpris (Energi-dashboard)"
|
"name": "Nåværende strømpris (Energi-dashboard)"
|
||||||
},
|
},
|
||||||
"next_interval_price": {
|
"next_interval_price": {
|
||||||
|
|
@ -616,91 +556,73 @@
|
||||||
"price_trend_1h": {
|
"price_trend_1h": {
|
||||||
"name": "Pristrend (1t)",
|
"name": "Pristrend (1t)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Sterkt stigende",
|
|
||||||
"rising": "Stigende",
|
"rising": "Stigende",
|
||||||
"stable": "Stabil",
|
|
||||||
"falling": "Fallende",
|
"falling": "Fallende",
|
||||||
"strongly_falling": "Sterkt fallende"
|
"stable": "Stabil"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_2h": {
|
"price_trend_2h": {
|
||||||
"name": "Pristrend (2t)",
|
"name": "Pristrend (2t)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Sterkt stigende",
|
|
||||||
"rising": "Stigende",
|
"rising": "Stigende",
|
||||||
"stable": "Stabil",
|
|
||||||
"falling": "Fallende",
|
"falling": "Fallende",
|
||||||
"strongly_falling": "Sterkt fallende"
|
"stable": "Stabil"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_3h": {
|
"price_trend_3h": {
|
||||||
"name": "Pristrend (3t)",
|
"name": "Pristrend (3t)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Sterkt stigende",
|
|
||||||
"rising": "Stigende",
|
"rising": "Stigende",
|
||||||
"stable": "Stabil",
|
|
||||||
"falling": "Fallende",
|
"falling": "Fallende",
|
||||||
"strongly_falling": "Sterkt fallende"
|
"stable": "Stabil"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_4h": {
|
"price_trend_4h": {
|
||||||
"name": "Pristrend (4t)",
|
"name": "Pristrend (4t)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Sterkt stigende",
|
|
||||||
"rising": "Stigende",
|
"rising": "Stigende",
|
||||||
"stable": "Stabil",
|
|
||||||
"falling": "Fallende",
|
"falling": "Fallende",
|
||||||
"strongly_falling": "Sterkt fallende"
|
"stable": "Stabil"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_5h": {
|
"price_trend_5h": {
|
||||||
"name": "Pristrend (5t)",
|
"name": "Pristrend (5t)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Sterkt stigende",
|
|
||||||
"rising": "Stigende",
|
"rising": "Stigende",
|
||||||
"stable": "Stabil",
|
|
||||||
"falling": "Fallende",
|
"falling": "Fallende",
|
||||||
"strongly_falling": "Sterkt fallende"
|
"stable": "Stabil"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_6h": {
|
"price_trend_6h": {
|
||||||
"name": "Pristrend (6t)",
|
"name": "Pristrend (6t)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Sterkt stigende",
|
|
||||||
"rising": "Stigende",
|
"rising": "Stigende",
|
||||||
"stable": "Stabil",
|
|
||||||
"falling": "Fallende",
|
"falling": "Fallende",
|
||||||
"strongly_falling": "Sterkt fallende"
|
"stable": "Stabil"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_8h": {
|
"price_trend_8h": {
|
||||||
"name": "Pristrend (8t)",
|
"name": "Pristrend (8t)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Sterkt stigende",
|
|
||||||
"rising": "Stigende",
|
"rising": "Stigende",
|
||||||
"stable": "Stabil",
|
|
||||||
"falling": "Fallende",
|
"falling": "Fallende",
|
||||||
"strongly_falling": "Sterkt fallende"
|
"stable": "Stabil"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"price_trend_12h": {
|
"price_trend_12h": {
|
||||||
"name": "Pristrend (12t)",
|
"name": "Pristrend (12t)",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Sterkt stigende",
|
|
||||||
"rising": "Stigende",
|
"rising": "Stigende",
|
||||||
"stable": "Stabil",
|
|
||||||
"falling": "Fallende",
|
"falling": "Fallende",
|
||||||
"strongly_falling": "Sterkt fallende"
|
"stable": "Stabil"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"current_price_trend": {
|
"current_price_trend": {
|
||||||
"name": "Nåværende pristrend",
|
"name": "Nåværende pristrend",
|
||||||
"state": {
|
"state": {
|
||||||
"strongly_rising": "Sterkt stigende",
|
|
||||||
"rising": "Stigende",
|
"rising": "Stigende",
|
||||||
"stable": "Stabil",
|
|
||||||
"falling": "Fallende",
|
"falling": "Fallende",
|
||||||
"strongly_falling": "Sterkt fallende"
|
"stable": "Stabil"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"next_price_trend_change": {
|
"next_price_trend_change": {
|
||||||
|
|
@ -873,14 +795,6 @@
|
||||||
"ready": "Klar",
|
"ready": "Klar",
|
||||||
"error": "Feil"
|
"error": "Feil"
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"chart_metadata": {
|
|
||||||
"name": "Diagrammetadata",
|
|
||||||
"state": {
|
|
||||||
"pending": "Venter",
|
|
||||||
"ready": "Klar",
|
|
||||||
"error": "Feil"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"binary_sensor": {
|
"binary_sensor": {
|
||||||
|
|
@ -902,52 +816,6 @@
|
||||||
"realtime_consumption_enabled": {
|
"realtime_consumption_enabled": {
|
||||||
"name": "Sanntidsforbruk aktivert"
|
"name": "Sanntidsforbruk aktivert"
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"number": {
|
|
||||||
"best_price_flex_override": {
|
|
||||||
"name": "Beste pris: Fleksibilitet"
|
|
||||||
},
|
|
||||||
"best_price_min_distance_override": {
|
|
||||||
"name": "Beste pris: Minimumsavstand"
|
|
||||||
},
|
|
||||||
"best_price_min_period_length_override": {
|
|
||||||
"name": "Beste pris: Minimum periodelengde"
|
|
||||||
},
|
|
||||||
"best_price_min_periods_override": {
|
|
||||||
"name": "Beste pris: Minimum perioder"
|
|
||||||
},
|
|
||||||
"best_price_relaxation_attempts_override": {
|
|
||||||
"name": "Beste pris: Lemping forsøk"
|
|
||||||
},
|
|
||||||
"best_price_gap_count_override": {
|
|
||||||
"name": "Beste pris: Gaptoleranse"
|
|
||||||
},
|
|
||||||
"peak_price_flex_override": {
|
|
||||||
"name": "Topppris: Fleksibilitet"
|
|
||||||
},
|
|
||||||
"peak_price_min_distance_override": {
|
|
||||||
"name": "Topppris: Minimumsavstand"
|
|
||||||
},
|
|
||||||
"peak_price_min_period_length_override": {
|
|
||||||
"name": "Topppris: Minimum periodelengde"
|
|
||||||
},
|
|
||||||
"peak_price_min_periods_override": {
|
|
||||||
"name": "Topppris: Minimum perioder"
|
|
||||||
},
|
|
||||||
"peak_price_relaxation_attempts_override": {
|
|
||||||
"name": "Topppris: Lemping forsøk"
|
|
||||||
},
|
|
||||||
"peak_price_gap_count_override": {
|
|
||||||
"name": "Topppris: Gaptoleranse"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"switch": {
|
|
||||||
"best_price_enable_relaxation_override": {
|
|
||||||
"name": "Beste pris: Oppnå minimumsantall"
|
|
||||||
},
|
|
||||||
"peak_price_enable_relaxation_override": {
|
|
||||||
"name": "Topppris: Oppnå minimumsantall"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"issues": {
|
"issues": {
|
||||||
|
|
@ -958,18 +826,6 @@
|
||||||
"homes_removed": {
|
"homes_removed": {
|
||||||
"title": "Tibber-hjem fjernet",
|
"title": "Tibber-hjem fjernet",
|
||||||
"description": "Vi oppdaget at {count} hjem har blitt fjernet fra din Tibber-konto: {homes}. Vennligst gjennomgå din Tibber-integrasjonskonfigurasjon."
|
"description": "Vi oppdaget at {count} hjem har blitt fjernet fra din Tibber-konto: {homes}. Vennligst gjennomgå din Tibber-integrasjonskonfigurasjon."
|
||||||
},
|
|
||||||
"tomorrow_data_missing": {
|
|
||||||
"title": "Prisdata for i morgen mangler for {home_name}",
|
|
||||||
"description": "Strømprisdata for i morgen er fortsatt utilgjengelig etter {warning_hour}:00. Dette er uvanlig, da Tibber vanligvis publiserer morgendagens priser på ettermiddagen (rundt 13:00-14:00 CET).\n\nMulige årsaker:\n- Tibber har ikke publisert morgendagens priser ennå\n- Midlertidige API-problemer\n- Strømleverandøren din har ikke sendt inn priser til Tibber\n\nDette problemet vil løse seg automatisk når morgendagens data blir tilgjengelig. Hvis dette vedvarer etter 20:00, vennligst sjekk Tibber-appen eller kontakt Tibber-support."
|
|
||||||
},
|
|
||||||
"rate_limit_exceeded": {
|
|
||||||
"title": "API-hastighetsbegrensning overskredet for {home_name}",
|
|
||||||
"description": "Tibber-APIet har hastighetsbegrenset denne integrasjonen etter {error_count} påfølgende feil. Dette betyr at forespørsler blir gjort for hyppig.\n\nIntegrasjonen vil automatisk prøve på nytt med økende forsinkelser. Dette problemet vil løse seg når hastighetsbegrensningen utløper.\n\nHvis dette vedvarer i flere timer, vurder:\n- Å sjekke om flere Home Assistant-instanser bruker samme API-token\n- Å verifisere at ingen andre applikasjoner bruker Tibber-API-tokenet ditt mye\n- Å redusere oppdateringsfrekvensen hvis du har tilpasset den"
|
|
||||||
},
|
|
||||||
"home_not_found": {
|
|
||||||
"title": "Hjemmet {home_name} ble ikke funnet i Tibber-kontoen",
|
|
||||||
"description": "Hjemmet konfigurert i denne integrasjonen (oppførings-ID: {entry_id}) er ikke lenger tilgjengelig i Tibber-kontoen din. Dette skjer vanligvis når:\n- Hjemmet ble slettet fra Tibber-kontoen din\n- Hjemmet ble flyttet til en annen Tibber-konto\n- Tilgang til dette hjemmet ble tilbakekalt\n\nVennligst fjern denne integrasjonsoppføringen og legg den til på nytt hvis hjemmet fortsatt skal overvåkes. For å fjerne denne oppføringen, gå til Innstillinger → Enheter og tjenester → Tibber Prices og slett {home_name}-konfigurasjonen."
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"services": {
|
"services": {
|
||||||
|
|
@ -1001,23 +857,11 @@
|
||||||
},
|
},
|
||||||
"day": {
|
"day": {
|
||||||
"name": "Dag",
|
"name": "Dag",
|
||||||
"description": "Hvilken dag som skal visualiseres (standard: Rullerende vindu). Faste dagalternativer (I går/I dag/I morgen) viser 24t-spenn uten ekstra avhengigheter. Dynamiske alternativer krever config-template-card: Rullerende vindu lager et fast 48t-vindu som automatisk skifter mellom i går+i dag og i dag+i morgen basert på datatilgjengelighet. Rullerende vindu (Auto-Zoom) oppfører seg likt, men zoomer i tillegg automatisk inn (2t tilbakeblikk + gjenværende tid til midnatt, graph_span reduseres hvert 15. minutt)."
|
"description": "Hvilken dag som skal visualiseres (i går, i dag eller i morgen). Hvis ikke angitt, returneres et rullende 2-dagers vindu: i dag+i morgen (når data for i morgen er tilgjengelig) eller i går+i dag (når data for i morgen ikke er tilgjengelig ennå)."
|
||||||
},
|
},
|
||||||
"level_type": {
|
"level_type": {
|
||||||
"name": "Nivåtype",
|
"name": "Nivåtype",
|
||||||
"description": "Velg hvilken prisnivåklassifisering som skal visualiseres: 'rating_level' (lav/normal/høy basert på dine konfigurerte terskelverdier) eller 'level' (Tibber API-nivåer: veldig billig/billig/normal/dyr/veldig dyr)."
|
"description": "Velg hvilken prisnivåklassifisering som skal visualiseres: 'rating_level' (lav/normal/høy basert på dine konfigurerte terskelverdier) eller 'level' (Tibber API-nivåer: veldig billig/billig/normal/dyr/veldig dyr)."
|
||||||
},
|
|
||||||
"highlight_best_price": {
|
|
||||||
"name": "Fremhev beste prisperioder",
|
|
||||||
"description": "Legg til et halvgjennomsiktig grønt overlegg for å fremheve de beste prisperiodene i diagrammet. Dette gjør det enkelt å visuelt identifisere de optimale tidene for energiforbruk."
|
|
||||||
},
|
|
||||||
"highlight_peak_price": {
|
|
||||||
"name": "Fremhev høyeste prisperioder",
|
|
||||||
"description": "Legg til et halvgjennomsiktig rødt overlegg for å fremheve de høyeste prisperiodene i diagrammet. Dette gjør det enkelt å visuelt identifisere tidene når energi er dyrest."
|
|
||||||
},
|
|
||||||
"resolution": {
|
|
||||||
"name": "Oppløsning",
|
|
||||||
"description": "Tidsoppløsning for diagramdata. 'interval' (standard): Opprinnelige 15-minutters intervaller (96 punkter per dag). 'hourly': Aggregerte timeverdier med et rullende 60-minutters vindu (24 punkter per dag) for et ryddigere og mindre rotete diagram."
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
@ -1072,36 +916,36 @@
|
||||||
"description": "Utdataformat for de returnerte dataene. Alternativer: 'array_of_objects' (standard, array av objekter med tilpassbare feltnavn), 'array_of_arrays' (array av [tidsstempel, pris]-arrays med avsluttende null-punkt for stepline-diagrammer)."
|
"description": "Utdataformat for de returnerte dataene. Alternativer: 'array_of_objects' (standard, array av objekter med tilpassbare feltnavn), 'array_of_arrays' (array av [tidsstempel, pris]-arrays med avsluttende null-punkt for stepline-diagrammer)."
|
||||||
},
|
},
|
||||||
"array_fields": {
|
"array_fields": {
|
||||||
"name": "Array-felt",
|
"name": "Array-felt (kun Array av arrays)",
|
||||||
"description": "Definer hvilke felt som skal inkluderes. Bruk feltnavn i krøllparenteser, adskilt med komma. Tilgjengelige felt: start_time, price_per_kwh, level, rating_level, average. Felt vil automatisk aktiveres selv om include_*-alternativene ikke er satt. La stå tom for standard (kun tidsstempel og pris)."
|
"description": "[KUN FOR Array av arrays FORMAT] Definer hvilke felt som skal inkluderes. Bruk feltnavn i krøllparenteser, adskilt med komma. Tilgjengelige felt: start_time, price_per_kwh, level, rating_level, average. Felt vil automatisk aktiveres selv om include_*-alternativene ikke er satt. La stå tom for standard (kun tidsstempel og pris)."
|
||||||
},
|
},
|
||||||
"subunit_currency": {
|
"minor_currency": {
|
||||||
"name": "Underenhet valuta",
|
"name": "Mindre valutaenhet",
|
||||||
"description": "Returner priser i underenhet valutaenheter (øre for NOK/SEK, cent for EUR) i stedet for basisvalutaenheter. Deaktivert som standard."
|
"description": "Returner priser i mindre valutaenheter (øre for NOK/SEK, cent for EUR) i stedet for hovedvalutaenheter. Deaktivert som standard."
|
||||||
},
|
},
|
||||||
"round_decimals": {
|
"round_decimals": {
|
||||||
"name": "Rund desimaler",
|
"name": "Rund desimaler",
|
||||||
"description": "Antall desimalplasser å runde priser til (0-10). Hvis ikke angitt, brukes standard presisjon (4 desimaler for basisvaluta, 2 for underenhet valuta)."
|
"description": "Antall desimalplasser å runde priser til (0-10). Hvis ikke angitt, brukes standard presisjon (4 desimaler for hovedvaluta, 2 for mindre valutaenhet)."
|
||||||
},
|
},
|
||||||
"include_level": {
|
"include_level": {
|
||||||
"name": "Inkluder prisnivå",
|
"name": "Inkluder prisnivå (kun Array av objekter)",
|
||||||
"description": "Inkluder Tibber-prisnivåfeltet (veldig billig/billig/normal/dyr/veldig dyr) i hvert datapunkt."
|
"description": "[KUN FOR Array av objekter FORMAT] Inkluder Tibber-prisnivåfeltet (VERY_CHEAP, CHEAP, NORMAL, EXPENSIVE, VERY_EXPENSIVE) i hvert datapunkt."
|
||||||
},
|
},
|
||||||
"include_rating_level": {
|
"include_rating_level": {
|
||||||
"name": "Inkluder prisvurdering",
|
"name": "Inkluder prisvurdering (kun Array av objekter)",
|
||||||
"description": "Inkluder det beregnede prisvurderingsfeltet (lav/normal/høy) basert på dine konfigurerte terskler i hvert datapunkt."
|
"description": "[KUN FOR Array av objekter FORMAT] Inkluder det beregnede prisvurderingsfeltet (LOW, NORMAL, HIGH) basert på dine konfigurerte terskler i hvert datapunkt."
|
||||||
},
|
},
|
||||||
"include_average": {
|
"include_average": {
|
||||||
"name": "Inkluder gjennomsnitt",
|
"name": "Inkluder gjennomsnitt (kun Array av objekter)",
|
||||||
"description": "Inkluder daglig gjennomsnittspris i hvert datapunkt for sammenligning."
|
"description": "[KUN FOR Array av objekter FORMAT] Inkluder daglig gjennomsnittspris i hvert datapunkt for sammenligning."
|
||||||
},
|
},
|
||||||
"level_filter": {
|
"level_filter": {
|
||||||
"name": "Prisnivåfilter",
|
"name": "Prisnivåfilter",
|
||||||
"description": "Filtrer intervaller for å bare inkludere spesifikke Tibber-prisnivåer (veldig billig/billig/normal/dyr/veldig dyr). Hvis ikke angitt, inkluderes alle nivåer."
|
"description": "Filtrer intervaller for å bare inkludere spesifikke Tibber-prisnivåer (VERY_CHEAP, CHEAP, NORMAL, EXPENSIVE, VERY_EXPENSIVE). Hvis ikke angitt, inkluderes alle nivåer."
|
||||||
},
|
},
|
||||||
"rating_level_filter": {
|
"rating_level_filter": {
|
||||||
"name": "Prisvurderingsfilter",
|
"name": "Prisvurderingsfilter",
|
||||||
"description": "Filtrer intervaller for å inkludere bare spesifikke prisvurderinger (lav/normal/høy). Hvis ikke spesifisert, inkluderes alle vurderinger."
|
"description": "Filtrer intervaller for å inkludere bare spesifikke prisvurderinger (LOW, NORMAL, HIGH). Hvis ikke spesifisert, inkluderes alle vurderinger."
|
||||||
},
|
},
|
||||||
"period_filter": {
|
"period_filter": {
|
||||||
"name": "Periodefilter",
|
"name": "Periodefilter",
|
||||||
|
|
@ -1113,43 +957,39 @@
|
||||||
},
|
},
|
||||||
"connect_segments": {
|
"connect_segments": {
|
||||||
"name": "Koble segmenter",
|
"name": "Koble segmenter",
|
||||||
"description": "[KUN MED 'Sett inn NULL-verdier'] Når aktivert, legges tilkoblingspunkter til ved segmentgrenser for å visuelt koble ulike prisnivå-segmenter i trinnlinjediagrammer. Når prisen går NED, legges et punkt med lavere pris til på slutten av gjeldende segment. Når prisen går OPP, legges et holdepunkt til før hullet. Dette skaper jevne visuelle overganger mellom segmenter i stedet for brå hull."
|
"description": "[KUN MED insert_nulls='segments'] Når aktivert, legges tilkoblingspunkter til ved segmentgrenser for å visuelt koble ulike prisnivå-segmenter i trinnlinjediagrammer. Når prisen går NED, legges et punkt med lavere pris til på slutten av gjeldende segment. Når prisen går OPP, legges et holdepunkt til før hullet. Dette skaper jevne visuelle overganger mellom segmenter i stedet for brå hull."
|
||||||
},
|
},
|
||||||
"add_trailing_null": {
|
"add_trailing_null": {
|
||||||
"name": "Legg til avsluttende null-punkt",
|
"name": "Legg til avsluttende null-punkt",
|
||||||
"description": "Legg til et siste datapunkt med nullverdier (unntatt tidsstempel) på slutten. Noen diagrambiblioteker trenger dette for å forhindre ekstrapolering/interpolering til visningsportens kant ved bruk av trinnlinje-rendering. La være deaktivert med mindre diagrammet ditt krever det."
|
"description": "[BEGGE FORMATER] Legg til et siste datapunkt med nullverdier (unntatt tidsstempel) på slutten. Noen diagrambiblioteker trenger dette for å forhindre ekstrapolering/interpolering til visningsportens kant ved bruk av trinnlinje-rendering. La være deaktivert med mindre diagrammet ditt krever det."
|
||||||
},
|
},
|
||||||
"start_time_field": {
|
"start_time_field": {
|
||||||
"name": "Starttid-feltnavn",
|
"name": "Starttid-feltnavn (kun Array of Objects)",
|
||||||
"description": "Egendefinert navn for starttid-feltet i utdata. Standard er 'start_time' hvis ikke angitt."
|
"description": "[KUN FOR Array of Objects FORMAT] Egendefinert navn for starttid-feltet i utdata. Standard er 'start_time' hvis ikke angitt."
|
||||||
},
|
},
|
||||||
"end_time_field": {
|
"end_time_field": {
|
||||||
"name": "Sluttid-feltnavn",
|
"name": "Sluttid-feltnavn (kun Array of Objects)",
|
||||||
"description": "Egendefinert navn for sluttid-feltet i utdata. Standard er 'end_time' hvis ikke angitt. Brukes kun med period_filter."
|
"description": "[KUN FOR Array of Objects FORMAT] Egendefinert navn for sluttid-feltet i utdata. Standard er 'end_time' hvis ikke angitt. Brukes kun med period_filter."
|
||||||
},
|
},
|
||||||
"price_field": {
|
"price_field": {
|
||||||
"name": "Prisfelt-navn",
|
"name": "Prisfelt-navn (kun Array av objekter)",
|
||||||
"description": "Tilpasset navn for prisfeltet i utdata. Standard er 'price_per_kwh'."
|
"description": "[KUN FOR Array av objekter FORMAT] Tilpasset navn for prisfeltet i utdata. Standard er 'price_per_kwh'."
|
||||||
},
|
},
|
||||||
"level_field": {
|
"level_field": {
|
||||||
"name": "Prisnivåfelt-navn",
|
"name": "Prisnivåfelt-navn (kun Array av objekter)",
|
||||||
"description": "Tilpasset navn for prisnivåfeltet i utdata. Standard er 'level'. Brukes bare når include_level er aktivert."
|
"description": "[KUN FOR Array av objekter FORMAT] Tilpasset navn for prisnivåfeltet i utdata. Standard er 'level'. Brukes bare når include_level er aktivert."
|
||||||
},
|
},
|
||||||
"rating_level_field": {
|
"rating_level_field": {
|
||||||
"name": "Prisvurderingsfelt-navn",
|
"name": "Prisvurderingsfelt-navn (kun Array av objekter)",
|
||||||
"description": "Tilpasset navn for prisvurderingsfeltet i utdata. Standard er 'rating_level'. Brukes bare når include_rating_level er aktivert."
|
"description": "[KUN FOR Array av objekter FORMAT] Tilpasset navn for prisvurderingsfeltet i utdata. Standard er 'rating_level'. Brukes bare når include_rating_level er aktivert."
|
||||||
},
|
},
|
||||||
"average_field": {
|
"average_field": {
|
||||||
"name": "Gjennomsnittsfelt-navn",
|
"name": "Gjennomsnittsfelt-navn (kun Array av objekter)",
|
||||||
"description": "Tilpasset navn for gjennomsnittsfeltet i utdata. Standard er 'average'. Brukes bare når include_average er aktivert."
|
"description": "[KUN FOR Array av objekter FORMAT] Tilpasset navn for gjennomsnittsfeltet i utdata. Standard er 'average'. Brukes bare når include_average er aktivert."
|
||||||
},
|
|
||||||
"metadata": {
|
|
||||||
"name": "Metadata",
|
|
||||||
"description": "Kontroller metadata-inkludering i svaret. 'include' (standard): Returnerer både diagramdata og metadata med prisstatistikk, valutainformasjon, Y-akse forslag og tidsperiode. 'only': Returnerer bare metadata uten å behandle diagramdata (raskt, nyttig for dynamisk Y-akse konfigurasjon). 'none': Returnerer bare diagramdata uten metadata."
|
|
||||||
},
|
},
|
||||||
"data_key": {
|
"data_key": {
|
||||||
"name": "Datanøkkel",
|
"name": "Datanøkkel (begge formater)",
|
||||||
"description": "Tilpasset navn for datanøkkelen på toppnivå i svaret. Standard er 'data' hvis ikke angitt."
|
"description": "[BEGGE FORMATER] Tilpasset navn for datanøkkelen på toppnivå i svaret. Standard er 'data' hvis ikke angitt. For ApexCharts-kompatibilitet med Array av arrays, bruk 'points'."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
@ -1174,9 +1014,7 @@
|
||||||
"options": {
|
"options": {
|
||||||
"yesterday": "I går",
|
"yesterday": "I går",
|
||||||
"today": "I dag",
|
"today": "I dag",
|
||||||
"tomorrow": "I morgen",
|
"tomorrow": "I morgen"
|
||||||
"rolling_window": "Rullerende vindu",
|
|
||||||
"rolling_window_autozoom": "Rullerende vindu (Auto-Zoom)"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"resolution": {
|
"resolution": {
|
||||||
|
|
@ -1226,13 +1064,6 @@
|
||||||
"peak_price": "Topp prisperioder"
|
"peak_price": "Topp prisperioder"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"metadata": {
|
|
||||||
"options": {
|
|
||||||
"include": "Inkluder (data + metadata)",
|
|
||||||
"only": "Kun metadata",
|
|
||||||
"none": "Ingen (kun data)"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"volatility": {
|
"volatility": {
|
||||||
"options": {
|
"options": {
|
||||||
"low": "Lav",
|
"low": "Lav",
|
||||||
|
|
@ -1250,18 +1081,6 @@
|
||||||
"expensive": "Dyr",
|
"expensive": "Dyr",
|
||||||
"very_expensive": "Svært dyr"
|
"very_expensive": "Svært dyr"
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"currency_display_mode": {
|
|
||||||
"options": {
|
|
||||||
"base": "Basisvaluta (€, kr)",
|
|
||||||
"subunit": "Underenhet valuta (ct, øre)"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"average_sensor_display": {
|
|
||||||
"options": {
|
|
||||||
"median": "Median",
|
|
||||||
"mean": "Aritmetisk gjennomsnitt"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"title": "Tibber Prisinformasjon & Vurderinger"
|
"title": "Tibber Prisinformasjon & Vurderinger"
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue