reset to new lazy and nvim
This commit is contained in:
		@@ -1,3 +1,8 @@
 | 
				
			|||||||
-- Autocmds are automatically loaded on the VeryLazy event
 | 
					-- Autocmds are automatically loaded on the VeryLazy event
 | 
				
			||||||
-- Default autocmds that are always set: https://github.com/LazyVim/LazyVim/blob/main/lua/lazyvim/config/autocmds.lua
 | 
					-- Default autocmds that are always set: https://github.com/LazyVim/LazyVim/blob/main/lua/lazyvim/config/autocmds.lua
 | 
				
			||||||
 | 
					--
 | 
				
			||||||
-- Add any additional autocmds here
 | 
					-- Add any additional autocmds here
 | 
				
			||||||
 | 
					-- with `vim.api.nvim_create_autocmd`
 | 
				
			||||||
 | 
					--
 | 
				
			||||||
 | 
					-- Or remove existing autocmds by their group name (which is prefixed with `lazyvim_` for the defaults)
 | 
				
			||||||
 | 
					-- e.g. vim.api.nvim_del_augroup_by_name("lazyvim_wrap_spell")
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1,5 +1,3 @@
 | 
				
			|||||||
-- Keymaps are automatically loaded on the VeryLazy event
 | 
					-- Keymaps are automatically loaded on the VeryLazy event
 | 
				
			||||||
-- Default keymaps that are always set: https://github.com/LazyVim/LazyVim/blob/main/lua/lazyvim/config/keymaps.lua
 | 
					-- Default keymaps that are always set: https://github.com/LazyVim/LazyVim/blob/main/lua/lazyvim/config/keymaps.lua
 | 
				
			||||||
-- Add any additional keymaps here
 | 
					-- Add any additional keymaps here
 | 
				
			||||||
--
 | 
					 | 
				
			||||||
vim.keymap.set("n", "<leader>fq", ":Telescope grep_string<cr>", { desc = "Quick Grep Find" })
 | 
					 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1,148 +0,0 @@
 | 
				
			|||||||
return {
 | 
					 | 
				
			||||||
  "yetone/avante.nvim",
 | 
					 | 
				
			||||||
  event = "VeryLazy",
 | 
					 | 
				
			||||||
  lazy = false,
 | 
					 | 
				
			||||||
  version = false, -- set this if you want to always pull the latest change
 | 
					 | 
				
			||||||
  opts = {
 | 
					 | 
				
			||||||
    claude = {
 | 
					 | 
				
			||||||
      ["local"] = true,
 | 
					 | 
				
			||||||
    },
 | 
					 | 
				
			||||||
    -- Begin opts
 | 
					 | 
				
			||||||
    provider = "ol_qwenm",
 | 
					 | 
				
			||||||
    vendors = {
 | 
					 | 
				
			||||||
      ---@type AvanteProvider
 | 
					 | 
				
			||||||
      ol_llama = {
 | 
					 | 
				
			||||||
        ["local"] = true,
 | 
					 | 
				
			||||||
        endpoint = "127.0.0.1:11434/v1",
 | 
					 | 
				
			||||||
        model = "llama3.1:latest",
 | 
					 | 
				
			||||||
        parse_curl_args = function(opts, code_opts)
 | 
					 | 
				
			||||||
          return {
 | 
					 | 
				
			||||||
            url = opts.endpoint .. "/chat/completions",
 | 
					 | 
				
			||||||
            headers = {
 | 
					 | 
				
			||||||
              ["Accept"] = "application/json",
 | 
					 | 
				
			||||||
              ["Content-Type"] = "application/json",
 | 
					 | 
				
			||||||
            },
 | 
					 | 
				
			||||||
            body = {
 | 
					 | 
				
			||||||
              model = opts.model,
 | 
					 | 
				
			||||||
              messages = require("avante.providers").copilot.parse_messages(code_opts), -- you can make your own message, but this is very advanced
 | 
					 | 
				
			||||||
              max_tokens = 2048,
 | 
					 | 
				
			||||||
              stream = true,
 | 
					 | 
				
			||||||
            },
 | 
					 | 
				
			||||||
          }
 | 
					 | 
				
			||||||
        end,
 | 
					 | 
				
			||||||
        parse_response_data = function(data_stream, event_state, opts)
 | 
					 | 
				
			||||||
          require("avante.providers").openai.parse_response(data_stream, event_state, opts)
 | 
					 | 
				
			||||||
        end,
 | 
					 | 
				
			||||||
      },
 | 
					 | 
				
			||||||
      ---@type AvanteProvider
 | 
					 | 
				
			||||||
      ol_qwenl = {
 | 
					 | 
				
			||||||
        ["local"] = true,
 | 
					 | 
				
			||||||
        endpoint = "127.0.0.1:11434/v1",
 | 
					 | 
				
			||||||
        model = "qwen2.5-coder:32b",
 | 
					 | 
				
			||||||
        parse_curl_args = function(opts, code_opts)
 | 
					 | 
				
			||||||
          return {
 | 
					 | 
				
			||||||
            url = opts.endpoint .. "/chat/completions",
 | 
					 | 
				
			||||||
            headers = {
 | 
					 | 
				
			||||||
              ["Accept"] = "application/json",
 | 
					 | 
				
			||||||
              ["Content-Type"] = "application/json",
 | 
					 | 
				
			||||||
            },
 | 
					 | 
				
			||||||
            body = {
 | 
					 | 
				
			||||||
              model = opts.model,
 | 
					 | 
				
			||||||
              messages = require("avante.providers").copilot.parse_messages(code_opts), -- you can make your own message, but this is very advanced
 | 
					 | 
				
			||||||
              max_tokens = 2048,
 | 
					 | 
				
			||||||
              stream = true,
 | 
					 | 
				
			||||||
            },
 | 
					 | 
				
			||||||
          }
 | 
					 | 
				
			||||||
        end,
 | 
					 | 
				
			||||||
        parse_response_data = function(data_stream, event_state, opts)
 | 
					 | 
				
			||||||
          require("avante.providers").openai.parse_response(data_stream, event_state, opts)
 | 
					 | 
				
			||||||
        end,
 | 
					 | 
				
			||||||
      },
 | 
					 | 
				
			||||||
      ---@type AvanteProvider
 | 
					 | 
				
			||||||
      ol_llama70b = {
 | 
					 | 
				
			||||||
        ["local"] = true,
 | 
					 | 
				
			||||||
        endpoint = "127.0.0.1:11434/v1",
 | 
					 | 
				
			||||||
        model = "llama3.1:70b",
 | 
					 | 
				
			||||||
        parse_curl_args = function(opts, code_opts)
 | 
					 | 
				
			||||||
          return {
 | 
					 | 
				
			||||||
            url = opts.endpoint .. "/chat/completions",
 | 
					 | 
				
			||||||
            headers = {
 | 
					 | 
				
			||||||
              ["Accept"] = "application/json",
 | 
					 | 
				
			||||||
              ["Content-Type"] = "application/json",
 | 
					 | 
				
			||||||
            },
 | 
					 | 
				
			||||||
            body = {
 | 
					 | 
				
			||||||
              model = opts.model,
 | 
					 | 
				
			||||||
              messages = require("avante.providers").copilot.parse_messages(code_opts), -- you can make your own message, but this is very advanced
 | 
					 | 
				
			||||||
              max_tokens = 2048,
 | 
					 | 
				
			||||||
              stream = true,
 | 
					 | 
				
			||||||
            },
 | 
					 | 
				
			||||||
          }
 | 
					 | 
				
			||||||
        end,
 | 
					 | 
				
			||||||
        parse_response_data = function(data_stream, event_state, opts)
 | 
					 | 
				
			||||||
          require("avante.providers").openai.parse_response(data_stream, event_state, opts)
 | 
					 | 
				
			||||||
        end,
 | 
					 | 
				
			||||||
      },
 | 
					 | 
				
			||||||
      ---@type AvanteProvider
 | 
					 | 
				
			||||||
      ol_qwenm = {
 | 
					 | 
				
			||||||
        ["local"] = true,
 | 
					 | 
				
			||||||
        endpoint = "127.0.0.1:11434/v1",
 | 
					 | 
				
			||||||
        model = "qwen2.5-coder:14b",
 | 
					 | 
				
			||||||
        parse_curl_args = function(opts, code_opts)
 | 
					 | 
				
			||||||
          return {
 | 
					 | 
				
			||||||
            url = opts.endpoint .. "/chat/completions",
 | 
					 | 
				
			||||||
            headers = {
 | 
					 | 
				
			||||||
              ["Accept"] = "application/json",
 | 
					 | 
				
			||||||
              ["Content-Type"] = "application/json",
 | 
					 | 
				
			||||||
            },
 | 
					 | 
				
			||||||
            body = {
 | 
					 | 
				
			||||||
              model = opts.model,
 | 
					 | 
				
			||||||
              messages = require("avante.providers").copilot.parse_messages(code_opts), -- you can make your own message, but this is very advanced
 | 
					 | 
				
			||||||
              max_tokens = 2048,
 | 
					 | 
				
			||||||
              stream = true,
 | 
					 | 
				
			||||||
            },
 | 
					 | 
				
			||||||
          }
 | 
					 | 
				
			||||||
        end,
 | 
					 | 
				
			||||||
        parse_response_data = function(data_stream, event_state, opts)
 | 
					 | 
				
			||||||
          require("avante.providers").openai.parse_response(data_stream, event_state, opts)
 | 
					 | 
				
			||||||
        end,
 | 
					 | 
				
			||||||
      },
 | 
					 | 
				
			||||||
    },
 | 
					 | 
				
			||||||
    -- add any opts here
 | 
					 | 
				
			||||||
  },
 | 
					 | 
				
			||||||
  -- if you want to build from source then do `make BUILD_FROM_SOURCE=true`
 | 
					 | 
				
			||||||
  build = "make",
 | 
					 | 
				
			||||||
  -- build = "powershell -ExecutionPolicy Bypass -File Build.ps1 -BuildFromSource false" -- for windows
 | 
					 | 
				
			||||||
  dependencies = {
 | 
					 | 
				
			||||||
    "stevearc/dressing.nvim",
 | 
					 | 
				
			||||||
    "nvim-lua/plenary.nvim",
 | 
					 | 
				
			||||||
    "MunifTanjim/nui.nvim",
 | 
					 | 
				
			||||||
    --- The below dependencies are optional,
 | 
					 | 
				
			||||||
    "nvim-tree/nvim-web-devicons", -- or echasnovski/mini.icons
 | 
					 | 
				
			||||||
    --"zbirenbaum/copilot.lua", -- for providers='copilot'
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
      -- support for image pasting
 | 
					 | 
				
			||||||
      "HakonHarnes/img-clip.nvim",
 | 
					 | 
				
			||||||
      event = "VeryLazy",
 | 
					 | 
				
			||||||
      opts = {
 | 
					 | 
				
			||||||
        -- recommended settings
 | 
					 | 
				
			||||||
        default = {
 | 
					 | 
				
			||||||
          embed_image_as_base64 = false,
 | 
					 | 
				
			||||||
          prompt_for_file_name = false,
 | 
					 | 
				
			||||||
          drag_and_drop = {
 | 
					 | 
				
			||||||
            insert_mode = true,
 | 
					 | 
				
			||||||
          },
 | 
					 | 
				
			||||||
          -- required for Windows users
 | 
					 | 
				
			||||||
          use_absolute_path = true,
 | 
					 | 
				
			||||||
        },
 | 
					 | 
				
			||||||
      },
 | 
					 | 
				
			||||||
    },
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
      -- Make sure to set this up properly if you have lazy=true
 | 
					 | 
				
			||||||
      "MeanderingProgrammer/render-markdown.nvim",
 | 
					 | 
				
			||||||
      opts = {
 | 
					 | 
				
			||||||
        file_types = { "markdown", "Avante" },
 | 
					 | 
				
			||||||
      },
 | 
					 | 
				
			||||||
      ft = { "markdown", "Avante" },
 | 
					 | 
				
			||||||
    },
 | 
					 | 
				
			||||||
  },
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
@@ -157,7 +157,11 @@ return {
 | 
				
			|||||||
    "nvim-lualine/lualine.nvim",
 | 
					    "nvim-lualine/lualine.nvim",
 | 
				
			||||||
    event = "VeryLazy",
 | 
					    event = "VeryLazy",
 | 
				
			||||||
    opts = function(_, opts)
 | 
					    opts = function(_, opts)
 | 
				
			||||||
      table.insert(opts.sections.lualine_x, "😄")
 | 
					      table.insert(opts.sections.lualine_x, {
 | 
				
			||||||
 | 
					        function()
 | 
				
			||||||
 | 
					          return "😄"
 | 
				
			||||||
 | 
					        end,
 | 
				
			||||||
 | 
					      })
 | 
				
			||||||
    end,
 | 
					    end,
 | 
				
			||||||
  },
 | 
					  },
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -22,39 +22,4 @@ return {
 | 
				
			|||||||
      labels = "hoeadstnbufgmlzxcv",
 | 
					      labels = "hoeadstnbufgmlzxcv",
 | 
				
			||||||
    },
 | 
					    },
 | 
				
			||||||
  },
 | 
					  },
 | 
				
			||||||
 | 
					 | 
				
			||||||
  {
 | 
					 | 
				
			||||||
    "hrsh7th/nvim-cmp",
 | 
					 | 
				
			||||||
    init = function()
 | 
					 | 
				
			||||||
      vim.g.cmp_disabled = false
 | 
					 | 
				
			||||||
    end,
 | 
					 | 
				
			||||||
    opts = function(_, opts)
 | 
					 | 
				
			||||||
      opts.enabled = function()
 | 
					 | 
				
			||||||
        -- local context = require("cmp.config.context")
 | 
					 | 
				
			||||||
        if vim.g.cmp_disabled == true then
 | 
					 | 
				
			||||||
          return false
 | 
					 | 
				
			||||||
        end
 | 
					 | 
				
			||||||
        -- some other conditions (like not in commments) can go here
 | 
					 | 
				
			||||||
        return not disabled
 | 
					 | 
				
			||||||
      end
 | 
					 | 
				
			||||||
    end,
 | 
					 | 
				
			||||||
    keys = {
 | 
					 | 
				
			||||||
      {
 | 
					 | 
				
			||||||
        "<leader>ua",
 | 
					 | 
				
			||||||
        function()
 | 
					 | 
				
			||||||
          vim.g.cmp_disabled = not vim.g.cmp_disabled
 | 
					 | 
				
			||||||
          local msg = ""
 | 
					 | 
				
			||||||
          if vim.g.cmp_disabled == true then
 | 
					 | 
				
			||||||
            msg = "Autocompletion (cmp) disabled"
 | 
					 | 
				
			||||||
          else
 | 
					 | 
				
			||||||
            msg = "Autocompletion (cmp) enabled"
 | 
					 | 
				
			||||||
          end
 | 
					 | 
				
			||||||
          vim.notify(msg, vim.log.levels.INFO)
 | 
					 | 
				
			||||||
        end,
 | 
					 | 
				
			||||||
        noremap = true,
 | 
					 | 
				
			||||||
        silent = true,
 | 
					 | 
				
			||||||
        desc = "toggle autocompletion",
 | 
					 | 
				
			||||||
      },
 | 
					 | 
				
			||||||
    },
 | 
					 | 
				
			||||||
  },
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user