feat(ai): Add Gemini backend with Ollama fallback

Prioritize Gemini backend if GEMINI_KEY enviroment variable is present,
otherwise fallback to the local Ollama instance.
This commit is contained in:
kj
2025-10-02 14:27:45 -03:00
parent 9cce858cd5
commit 844e2965db

View File

@ -12,17 +12,22 @@
;; Cliente LLM (ollama, chatgpt, gemini, etc.)
(use-package gptel
:config
;; (setq gptel-model 'gemma3:4b
;; gptel-backend (gptel-make-ollama "Ollama"
;; :host "localhost:11434"
;; :stream t
;; :models '("mistral:latest"
;; "deepseek-r1:1.5b"
;; "deepcoder"
;; "dolphin-llama3:latest"
;; "gemma3:4b"
;; "llava:latest"))
gptel-default-mode 'org-mode
(if (getenv "GEMINI_KEY")
(setq gptel-model 'gemini-2.5-flash
gptel-backend (gptel-make-gemini "Gemini"
:key (getenv "GEMINI_KEY")
:stream t))
(setq gptel-model 'gemma3:4b
gptel-backend (gptel-make-ollama "Ollama"
:host "localhost:11434"
:stream t
:models '("mistral:latest"
"deepseek-r1:1.5b"
"deepcoder"
"dolphin-llama3:latest"
"gemma3:4b"
"llava:latest"))))
(setq gptel-default-mode 'org-mode
gptel-prompt-prefix-alist
'((markdown-mode . "# User\n\n")
(org-mode . "* User\n\n")