return { { "yetone/avante.nvim", event = "VeryLazy", lazy = false, opts = { -- Hack to make local LLM work smoothly. Breaks claude entirely. claude = { ["local"] = true, }, provider = "ol_llama", vendors = { ---@type AvanteProvider ol_llama = { ["local"] = true, endpoint = "127.0.0.1:11434/v1", model = "llama3.1:latest", parse_curl_args = function(opts, code_opts) return { url = opts.endpoint .. "/chat/completions", headers = { ["Accept"] = "application/json", ["Content-Type"] = "application/json", }, body = { model = opts.model, messages = require("avante.providers").copilot.parse_message(code_opts), -- you can make your own message, but this is very advanced max_tokens = 2048, stream = true, }, } end, parse_response_data = function(data_stream, event_state, opts) require("avante.providers").openai.parse_response(data_stream, event_state, opts) end, }, ---@type AvanteProvider ol_llama70b = { ["local"] = true, endpoint = "127.0.0.1:11434/v1", model = "llama3.1:70b", parse_curl_args = function(opts, code_opts) return { url = opts.endpoint .. "/chat/completions", headers = { ["Accept"] = "application/json", ["Content-Type"] = "application/json", }, body = { model = opts.model, messages = require("avante.providers").copilot.parse_message(code_opts), -- you can make your own message, but this is very advanced max_tokens = 2048, stream = true, }, } end, parse_response_data = function(data_stream, event_state, opts) require("avante.providers").openai.parse_response(data_stream, event_state, opts) end, }, ---@type AvanteProvider ol_gemma = { ["local"] = true, endpoint = "127.0.0.1:11434/v1", model = "codegemma", parse_curl_args = function(opts, code_opts) return { url = opts.endpoint .. "/chat/completions", headers = { ["Accept"] = "application/json", ["Content-Type"] = "application/json", }, body = { model = opts.model, messages = require("avante.providers").copilot.parse_message(code_opts), -- you can make your own message, but this is very advanced max_tokens = 2048, stream = true, }, } end, parse_response_data = function(data_stream, event_state, opts) require("avante.providers").openai.parse_response(data_stream, event_state, opts) end, }, }, -- add any opts here }, keys = { { "aa", function() require("avante.api").ask() end, desc = "avante: ask", mode = { "n", "v" }, }, { "ar", function() require("avante.api").refresh() end, desc = "avante: refresh", }, { "ae", function() require("avante.api").edit() end, desc = "avante: edit", mode = "v", }, }, dependencies = { "stevearc/dressing.nvim", "nvim-lua/plenary.nvim", "MunifTanjim/nui.nvim", --- The below dependencies are optional, "nvim-tree/nvim-web-devicons", -- or echasnovski/mini.icons -- "echasnovski/mini.icons", { -- support for image pasting "HakonHarnes/img-clip.nvim", event = "VeryLazy", opts = { -- recommended settings default = { embed_image_as_base64 = false, prompt_for_file_name = false, drag_and_drop = { insert_mode = true, }, -- required for Windows users use_absolute_path = true, }, }, }, { -- Make sure to setup it properly if you have lazy=true "MeanderingProgrammer/render-markdown.nvim", opts = { file_types = { "markdown", "Avante" }, }, ft = { "markdown", "Avante" }, }, }, }, }