{% from '_helpers.html' import render_field %} {# AI/LLM settings tab content — included from settings.html. Requires template context: form, llm_config, llm_env_configured #}

{{ _('Configure an AI/LLM provider to enable intent-based change filtering. Each watch or tag can have a plain-English intent — the AI evaluates every detected change against it and only notifies you when it matches.') }}

{% if llm_env_configured %} {# Config is coming from environment variables — hide the form #}
{{ _('Note') }} {{ _('AI/LLM is configured via environment variables') }} (LLM_MODEL={{ llm_config.get('model', '') }}{% if llm_config.get('api_base') %}, LLM_API_BASE={{ llm_config.get('api_base') }}{% endif %}). {{ _('Remove the') }} LLM_MODEL {{ _('environment variable to configure via this form instead.') }}
{% else %}
{{ render_field(form.llm.form.llm_model) }} {{ _('The model string encodes both provider and model — litellm routes automatically. Any') }} {{ _('litellm-supported model') }} {{ _('works here.') }}
{{ render_field(form.llm.form.llm_api_key) }}
{{ render_field(form.llm.form.llm_api_base) }} {{ _('Only needed for Ollama or custom/self-hosted endpoints. Leave blank for cloud providers.') }}
{% if llm_config and llm_config.get('model') %}
✓ {{ _('AI configured:') }} {{ llm_config.get('model') }}
{% endif %}

{{ _("Your API key is stored locally and sent only to your chosen provider. On each detected change, the watch's diff and extracted text are sent to the LLM — no full page HTML.") }}

{% endif %}{# llm_env_configured #}