Files
confi-emacs-actual/configs/init-ai.el

119 lines
4.6 KiB
EmacsLisp

;;; init-ai.el --- Configuración de inteligencias artificales -*- lexical-binding: t -*-
;; Author: kj <webmaster@outcontrol.net>
;; URL: https://git.kj2.me/kj/confi-emacs-actual
;;; Commentary:
;; Configuración para Inteligencia artifical en Emacs.
;;; Code:
;; Cliente LLM (ollama, chatgpt, gemini, etc.)
(use-package gptel
:defer nil
:config
(setq gptel-model 'gemma4:e2b
gptel-backend (gptel-make-ollama "Ollama"
:host "localhost:11434"
:stream t
:request-params '(:think :json-false)
:models '("cajina/gemma4_e2b-Q4_k_s:v01"
"su_robin/gemma-4-E4B-it-Q4_K_M:latest"
"dagbs/qwen2.5-coder-0.5b-instruct-abliterated:q4_k_l"
"jewelzufo/Qwen2.5-Coder-0.5B-Instruct-GGUF-Assistant:latest"
"jaahas/qwen3.5-uncensored:4b"
"jaahas/qwen3.5-uncensored:2b"
"qwen3.5:4b"
"qwen3.5:2b"
"gemma4:e2b"
"glm-5.1:cloud"
"gemma4:31b-cloud"
"minimax-m2.5:cloud"
"gpt-oss:120b-cloud"
"gemini-3-flash-preview:cloud"
"glm-5:cloud"
"qwen3-coder-next:cloud"
"qwen3-coder:480b-cloud"
"embeddinggemma:latest")))
(when (getenv "GEMINI_KEY")
(setq gptel-model 'gemini-2.5-flash
gptel-backend (gptel-make-gemini "Gemini"
:key (getenv "GEMINI_KEY")
:stream t)))
(setq gptel-default-mode 'org-mode
gptel-prompt-prefix-alist
'((markdown-mode . "# User\n\n")
(org-mode . "* User\n\n")
(text-mode . "# User\n\n"))
gptel-response-prefix-alist
'((markdown-mode . "# AI\n\n")
(org-mode . "* AI\n\n")
(text-mode . "# AI\n\n"))
gptel-directives
'((default . "You are a large language model living in Emacs and a helpful assistant. Respond concisely in Spanish."))
)
(setopt gptel-include-reasoning nil) ;; Mantener hasta resolver: https://github.com/ragnard/gptel-magit/issues/8
(defun gptel-switch+model ()
"Switch to gptel backend and model in a single completion prompt."
(interactive)
(let (choices)
(dolist (pair gptel--known-backends)
(let* ((backend-name (car pair))
(backend (cdr pair))
(models
(and (fboundp 'gptel-backend-models)
(gptel-backend-models backend))))
(when models
(dolist (model models)
(push (cons ; (format "%s:%s" backend-name model)
(format "%s → %s"
(propertize backend-name 'face 'font-lock-keyword-face)
(propertize (symbol-name model) 'face 'font-lock-function-name-face))
(cons backend-name model))
choices)))))
(let* ((choice
(completing-read "Model: " (mapcar #'car choices) nil t))
(sel (cdr (assoc choice choices))))
(setq gptel-backend (cdr (assoc (car sel) gptel--known-backends))
gptel-model (cdr sel))
(message "gptel set to %s:%s" (car sel) (cdr sel)))))
)
;; (use-package copilot
;; :hook (prog-mode . copilot-mode)
;; :bind (:map copilot-completion-map
;; ("C-g" . 'copilot-clear-overlay)
;; ("C-<return>" . 'copilot-accept-completion)
;; ("S-<return>" . 'copilot-accept-completion-by-word)))
;; El asistente más completo de todos: Tiene chat mpc, code completion, etc.
;; (use-package eca
;; ;; :hook (prog-mode . eca-completion-mode)
;; :bind (("M-<return>" . eca-complete)
;; :map eca-completion-map
;; ("C-<return>" . eca-completion-accept)))
(use-package gptel-magit
:ensure t
:hook (magit-mode . gptel-magit-install))
(use-package gptel-autocomplete
:ensure (:host github :repo "JDNdeveloper/gptel-autocomplete")
:bind (("M-<return>" . gptel-complete)
:map gptel-autocomplete-completion-map
("C-<return>" . gptel-accept-completion)))
(use-package gptel-agent
:ensure t
:config (gptel-agent-update))
(use-package macher
:ensure (:host github :repo "kmontag/macher")
:custom
(macher-action-buffer-ui 'org))
(provide 'init-ai)
;;; init-ai.el ends here