diff --git a/lua/orgmode/clock/init.lua b/lua/orgmode/clock/init.lua index f6802c689..608352262 100644 --- a/lua/orgmode/clock/init.lua +++ b/lua/orgmode/clock/init.lua @@ -6,25 +6,87 @@ local Input = require('orgmode.ui.input') ---@class OrgClock ---@field files OrgFiles ---@field clocked_headline OrgHeadline|nil +---@field private _clocked_headline_searched boolean Lazy init flag: only search when needed local Clock = {} function Clock:new(opts) local data = { files = opts.files, clocked_headline = nil, + _clocked_headline_searched = false, } setmetatable(data, self) self.__index = self - data:init() + + data:_schedule_async_preload() + return data end --- When first loading, check if there are active clocks -function Clock:init() +---Schedule async preload of clocked headline after files are loaded +function Clock:_schedule_async_preload() + vim.schedule(function() + if self.files.load_state == 'loaded' then + return self:_search_clocked_headline_async() + end + + require('orgmode'):on_files_loaded(function() + vim.defer_fn(function() + self:_search_clocked_headline_async() + end, 1) + end) + end) +end + +-- Async search for clocked headline - non-blocking background search +function Clock:_search_clocked_headline_async() + if self._clocked_headline_searched then + return + end + + -- Don't search if files aren't loaded yet + if self.files.load_state ~= 'loaded' then + return + end + + self._clocked_headline_searched = true + + self.files:get_clocked_headline_async():next(function(headline) + if headline and headline:is_clocked_in() then + self.clocked_headline = headline + end + end) +end + +-- Sync fallback for when immediate access is needed (e.g., clock operations before preload completes) +function Clock:_ensure_clocked_headline_searched() + if self._clocked_headline_searched then + return + end + + -- Don't search if files aren't loaded yet + if self.files.load_state ~= 'loaded' then + return + end + + self._clocked_headline_searched = true + + local emit = require('orgmode.utils.emit') + emit.profile('start', 'clock', 'START (sync fallback)') + local last_clocked_headline = self.files:get_clocked_headline() + if profiler then + profiler.mark('get_clocked_headline() complete') + end + if last_clocked_headline and last_clocked_headline:is_clocked_in() then self.clocked_headline = last_clocked_headline end + + if profiler then + profiler.mark('COMPLETE') + profiler.finish() + end end function Clock:update_clocked_headline() @@ -35,6 +97,8 @@ function Clock:update_clocked_headline() end function Clock:has_clocked_headline() + -- Ensure we've done the initial search + self:_ensure_clocked_headline_searched() self:update_clocked_headline() return self.clocked_headline ~= nil end @@ -114,6 +178,9 @@ function Clock:org_set_effort() end function Clock:get_statusline() + -- Lazy init: search for clocked headline on first statusline call + self:_ensure_clocked_headline_searched() + if not self.clocked_headline or not self.clocked_headline:is_clocked_in() then return '' end diff --git a/lua/orgmode/config/defaults.lua b/lua/orgmode/config/defaults.lua index 2a5ebd64f..cd4ceeb28 100644 --- a/lua/orgmode/config/defaults.lua +++ b/lua/orgmode/config/defaults.lua @@ -2,6 +2,7 @@ local DefaultConfig = { org_agenda_files = '', org_default_notes_file = '', + org_async_loading = false, -- When true, use progressive async loading for files org_todo_keywords = { 'TODO', '|', 'DONE' }, org_todo_repeat_to_state = nil, org_todo_keyword_faces = {}, @@ -240,6 +241,9 @@ local DefaultConfig = { }, }, }, + profiling = { + enabled = false, -- Enable profiling (default: false for zero overhead) + }, } return DefaultConfig diff --git a/lua/orgmode/events/types/init.lua b/lua/orgmode/events/types/init.lua index 73afa80f8..f3a02edde 100644 --- a/lua/orgmode/events/types/init.lua +++ b/lua/orgmode/events/types/init.lua @@ -6,4 +6,5 @@ return { HeadlinePromoted = require('orgmode.events.types.headline_promoted_event'), HeadlineDemoted = require('orgmode.events.types.headline_demoted_event'), HeadingToggled = require('orgmode.events.types.heading_toggled'), + Profiling = require('orgmode.events.types.profiling_event'), } diff --git a/lua/orgmode/events/types/profiling_event.lua b/lua/orgmode/events/types/profiling_event.lua new file mode 100644 index 000000000..75166ee8f --- /dev/null +++ b/lua/orgmode/events/types/profiling_event.lua @@ -0,0 +1,50 @@ +---@class OrgProfilingEvent: OrgEvent +---@field type string Always 'orgmode.profiling' +---@field action 'start'|'mark'|'complete' The profiling action +---@field category string Session category (e.g., 'init', 'files', 'plugin:my-plugin') +---@field label string Human-readable label +---@field payload? table Optional additional data +local ProfilingEvent = { + type = 'orgmode.profiling', +} +ProfilingEvent.__index = ProfilingEvent + +---@param action 'start'|'mark'|'complete' +---@param category string +---@param label string +---@param payload? table +---@return OrgProfilingEvent +function ProfilingEvent:new(action, category, label, payload) + local obj = setmetatable({}, self) + obj.action = action + obj.category = category + obj.label = label + obj.payload = payload + return obj +end + +---@param category string +---@param label string +---@param payload? table +---@return OrgProfilingEvent +function ProfilingEvent:start(category, label, payload) + return self:new('start', category, label, payload) +end + +---@param category string +---@param label string +---@param payload? table +---@return OrgProfilingEvent +function ProfilingEvent:mark(category, label, payload) + return self:new('mark', category, label, payload) +end + +---@param category string +---@param label string +---@param payload? table +---@return OrgProfilingEvent +function ProfilingEvent:complete(category, label, payload) + return self:new('complete', category, label, payload) +end + +return ProfilingEvent diff --git a/lua/orgmode/files/file.lua b/lua/orgmode/files/file.lua index 3829ce21b..8df9c3010 100644 --- a/lua/orgmode/files/file.lua +++ b/lua/orgmode/files/file.lua @@ -30,6 +30,7 @@ local Buffers = require('orgmode.state.buffers') ---@field metadata OrgFileMetadata ---@field parser vim.treesitter.LanguageTree ---@field root TSNode +---@field _load_timing? { since_last_ms: number, since_start_ms: number } Progressive loading timing data local OrgFile = {} local memoize = Memoize:new(OrgFile, function(self) diff --git a/lua/orgmode/files/init.lua b/lua/orgmode/files/init.lua index 93dad134e..ecb3e6b39 100644 --- a/lua/orgmode/files/init.lua +++ b/lua/orgmode/files/init.lua @@ -5,6 +5,21 @@ local config = require('orgmode.config') local ts_utils = require('orgmode.utils.treesitter') local Listitem = require('orgmode.files.elements.listitem') +---Get filenames of all currently open org buffers +---@return table Set of open org file paths +local function get_open_org_buffers() + local open_bufs = {} + for _, buf in ipairs(vim.api.nvim_list_bufs()) do + if vim.api.nvim_buf_is_loaded(buf) then + local name = vim.api.nvim_buf_get_name(buf) + if name:match('%.org$') then + open_bufs[vim.fn.resolve(name)] = true + end + end + end + return open_bufs +end + ---@class OrgFilesOpts ---@field paths string | string[] ---@field cache? boolean Store the instances to cache and retrieve it later if paths are the same @@ -19,6 +34,29 @@ local Listitem = require('orgmode.files.elements.listitem') ---@field files table table with files that are part of paths ---@field all_files table all loaded files, no matter if they are part of paths ---@field load_state 'loading' | 'loaded' | nil +---@field progressive_state? OrgLoadProgress Progressive loading state +---@field private _load_promise? OrgPromise Shared promise for idempotent loading + +---@class OrgLoadProgress +---@field loaded number Number of files loaded so far +---@field total number Total number of files to load +---@field loading boolean Whether loading is in progress +---@field batch_timings? OrgBatchTiming[] Timing data for each batch (set on completion) +---@field total_duration_ms? number Total wall-clock time for loading (set on completion) + +---@class OrgFileScanResult +---@field filename string Absolute path to the org file +---@field mtime_sec number File modification time in seconds (stat.mtime.sec) +---@field mtime_nsec number File modification time nanoseconds (stat.mtime.nsec) +---@field size number File size in bytes + +---@alias OrgLoadProgressiveSortFn fun(a: OrgFileScanResult, b: OrgFileScanResult): boolean + +---@class OrgLoadProgressiveOpts +---@field on_file_loaded? fun(file: OrgFile, index: number, total: number) Callback per file loaded +---@field on_complete? fun(files: OrgFile[]) Callback when all files loaded +---@field filter? fun(metadata: OrgFileScanResult): boolean Pre-load filter + local OrgFiles = { cached_instances = {}, } @@ -32,6 +70,7 @@ function OrgFiles:new(opts) all_files = {}, load_state = nil, cache = opts.cache or false, + _load_promise = nil, } setmetatable(data, self) data.paths = self:_setup_paths(opts.paths) @@ -137,6 +176,92 @@ function OrgFiles:get_clocked_headline() return nil end +---@param file OrgFile +---@return OrgHeadline|nil +local function find_clocked_headline_in_file(file) + for _, headline in ipairs(file:get_headlines()) do + if headline:is_clocked_in() then + return headline + end + end + return nil +end + +---@class ClockSearchState +---@field files OrgFile[] +---@field index number +---@field batch_num number +---@field time_budget_ms number +---@field emit table + +---@param state ClockSearchState +local function emit_batch_mark(state, batch_start, elapsed_ms) + state.emit.profile( + 'mark', + 'clock', + string.format('batch %d: files %d-%d (%.1fms)', state.batch_num, batch_start, state.index, elapsed_ms) + ) +end + +---@param state ClockSearchState +---@param resolve fun(headline: OrgHeadline|nil) +local function process_clock_search_batch(state, resolve) + state.batch_num = state.batch_num + 1 + local batch_start = state.index + 1 + local batch_start_time = vim.uv.hrtime() + local total = #state.files + + while state.index < total do + state.index = state.index + 1 + local headline = find_clocked_headline_in_file(state.files[state.index]) + if headline then + state.emit.profile('complete', 'clock', 'FOUND clocked headline', { + file_index = state.index, + batch = state.batch_num, + }) + return resolve(headline) + end + + local elapsed_ms = (vim.uv.hrtime() - batch_start_time) / 1e6 + if elapsed_ms >= state.time_budget_ms then + emit_batch_mark(state, batch_start, elapsed_ms) + return vim.defer_fn(function() + process_clock_search_batch(state, resolve) + end, 1) + end + end + + local elapsed_ms = (vim.uv.hrtime() - batch_start_time) / 1e6 + emit_batch_mark(state, batch_start, elapsed_ms) + state.emit.profile('complete', 'clock', 'NO clocked headline found') + resolve(nil) +end + +---Search for clocked headline asynchronously (non-blocking) +---Uses time-boxed batches to ensure UI stays responsive regardless of file sizes +---@return OrgPromise +function OrgFiles:get_clocked_headline_async() + local emit = require('orgmode.utils.emit') + ---@type ClockSearchState + local state = { + files = self:all(), + index = 0, + batch_num = 0, + time_budget_ms = 50, -- ~20fps, imperceptible during normal navigation + emit = emit, + } + + return Promise.new(function(resolve) + emit.profile('start', 'clock', 'get_clocked_headline_async START', { + total = #state.files, + time_budget_ms = state.time_budget_ms, + }) + vim.defer_fn(function() + process_clock_search_batch(state, resolve) + end, 1) + end) +end + function OrgFiles:get_current_file() local filename = utils.current_file_path() local orgfile = self:load_file_sync(filename) @@ -384,4 +509,328 @@ function OrgFiles:_files(skip_resolve) end, all_files) end +---Scan all org files and return metadata without loading/parsing content. +---This is a fast operation that only reads file system metadata. +---Useful for change detection and ordering files by modification time. +---@return OrgFileScanResult[] +function OrgFiles:scan() + local metadata = {} + for _, filepath in ipairs(self:_files()) do + local stat = vim.uv.fs_stat(filepath) + if stat and stat.type == 'file' then + table.insert(metadata, { + filename = filepath, + mtime_sec = stat.mtime.sec, + mtime_nsec = stat.mtime.nsec, + size = stat.size, + }) + end + end + return metadata +end + +---Sort metadata according to options +---@private +---@param metadata OrgFileScanResult[] +---@param opts OrgLoadProgressiveOpts +---@return OrgFileScanResult[] +function OrgFiles:_sort_metadata(metadata, opts) + if type(opts.order_by) == 'function' then + ---@cast opts {order_by: OrgLoadProgressiveSortFn} + table.sort(metadata, opts.order_by) + return metadata + end + + local order_by = opts.order_by or 'mtime' + local direction = opts.direction + + ---@param attr string + ---@param desc boolean + ---@return fun(a: OrgFileScanResult, b: OrgFileScanResult): boolean + local function by(attr, desc) + return desc and function(a, b) + return a[attr] > b[attr] + end or function(a, b) + return a[attr] < b[attr] + end + end + + if order_by == 'mtime' then + direction = direction or 'desc' + local desc = direction == 'desc' + table.sort(metadata, function(a, b) + if a.mtime_sec ~= b.mtime_sec then + return by('mtime_sec', desc)(a, b) + end + return by('mtime_nsec', desc)(a, b) + end) + elseif order_by == 'name' then + direction = direction or 'asc' + table.sort(metadata, by('filename', direction == 'desc')) + end + + return metadata +end + +---@class OrgBatchTiming +---@field batch_num number Batch number (1 = open buffers, 2 = remaining files) +---@field files_count number Number of files in this batch +---@field duration_ms number How long the batch took to process +---@field cumulative_ms number Wall-clock time since load started +---@field gap_from_prev_ms number Time gap from previous batch end to this batch start +---@field total_bytes number Total size of files in this batch +---@field mem_before_kb number Lua memory before batch (KB) +---@field mem_after_kb number Lua memory after batch (KB) + +---@class OrgLoadQueueState +---@field total number Total number of files to load +---@field loaded_count number Number of files loaded so far +---@field loaded_files OrgFile[] Files that have been loaded +---@field batch_start_time number High-resolution timestamp when loading started +---@field last_callback_time number High-resolution timestamp of last callback +---@field batch_timings OrgBatchTiming[] Timing data for each batch +---@field last_batch_end_time number|nil hrtime when previous batch completed + +---@private +---Handle a single loaded file: update state, track timing, fire callback +---@param orgfile OrgFile|nil The loaded file (nil if loading failed) +---@param index number The index of this file in the load queue +---@param state OrgLoadQueueState Mutable state for the load operation +---@param opts OrgLoadProgressiveOpts Options including callbacks +---@return OrgFile|nil +function OrgFiles:_handle_loaded_file(orgfile, index, state, opts) + if not orgfile then + return orgfile + end + + -- Capture timing to detect if callbacks batch without yielding + local now = vim.uv.hrtime() + orgfile._load_timing = { + since_last_ms = (now - state.last_callback_time) / 1e6, + since_start_ms = (now - state.batch_start_time) / 1e6, + } + state.last_callback_time = now + + -- Update file and state + orgfile.index = index + self.files[orgfile.filename] = orgfile + state.loaded_count = state.loaded_count + 1 + table.insert(state.loaded_files, orgfile) + self.progressive_state.loaded = state.loaded_count + + -- Fire per-file callback + if opts.on_file_loaded then + opts.on_file_loaded(orgfile, state.loaded_count, state.total) + end + + return orgfile +end + +---@private +---Handle completion of all file loading +---@param state OrgLoadQueueState Mutable state for the load operation +---@param opts OrgLoadProgressiveOpts Options including callbacks +---@return OrgFiles +function OrgFiles:_handle_all_loaded(state, opts) + self.progressive_state.loading = false + self.progressive_state.batch_timings = state.batch_timings + self.progressive_state.total_duration_ms = (vim.uv.hrtime() - state.batch_start_time) / 1e6 + self.load_state = 'loaded' + + if opts.on_complete then + opts.on_complete(state.loaded_files) + end + + return self +end + +---@private +---Load a batch of files and record timing +---@param files OrgFileScanResult[] Files to load in this batch +---@param batch_num number Batch number for profiling +---@param state OrgLoadQueueState Mutable state for the load operation +---@param opts OrgLoadProgressiveOpts Options including callbacks +---@return OrgPromise +function OrgFiles:_load_batch(files, batch_num, state, opts) + if #files == 0 then + return Promise.resolve() + end + + local mem_before = collectgarbage('count') + local batch_start_hrtime = vim.uv.hrtime() + + -- Calculate gap from previous batch + local gap_ms = 0 + if state.last_batch_end_time then + gap_ms = (batch_start_hrtime - state.last_batch_end_time) / 1e6 + end + + -- Calculate total bytes for this batch + local total_bytes = 0 + for _, m in ipairs(files) do + total_bytes = total_bytes + (m.size or 0) + end + + -- Load all files in this batch in parallel + local promises = {} + for i, m in ipairs(files) do + local file_index = state.loaded_count + i + local promise = self:load_file(m.filename):next(function(orgfile) + return self:_handle_loaded_file(orgfile, file_index, state, opts) + end) + table.insert(promises, promise) + end + + return Promise.all(promises):next(function() + local batch_end_hrtime = vim.uv.hrtime() + local mem_after = collectgarbage('count') + + -- Record batch timing + table.insert(state.batch_timings, { + batch_num = batch_num, + files_count = #files, + duration_ms = (batch_end_hrtime - batch_start_hrtime) / 1e6, + cumulative_ms = (batch_end_hrtime - state.batch_start_time) / 1e6, + gap_from_prev_ms = gap_ms, + total_bytes = total_bytes, + mem_before_kb = mem_before, + mem_after_kb = mem_after, + }) + + state.last_batch_end_time = batch_end_hrtime + end) +end + +---Load files progressively: open buffers first, then remaining files. +---@param opts? OrgLoadProgressiveOpts +---@return OrgPromise +function OrgFiles:load_progressive(opts) + opts = opts or {} + local metadata = self:scan() + + -- Apply filter if provided + if opts.filter then + metadata = vim.tbl_filter(opts.filter, metadata) + end + + -- Partition: open org buffers first, then rest + local open_bufs = get_open_org_buffers() + local open_files, rest_files = {}, {} + for _, m in ipairs(metadata) do + if open_bufs[vim.fn.resolve(m.filename)] then + table.insert(open_files, m) + else + table.insert(rest_files, m) + end + end + + -- Initialize state + ---@type OrgLoadQueueState + local state = { + total = #metadata, + loaded_count = 0, + loaded_files = {}, + batch_start_time = vim.uv.hrtime(), + batch_timings = {}, + last_batch_end_time = nil, + } + state.last_callback_time = state.batch_start_time + + self.progressive_state = { + loaded = 0, + total = state.total, + loading = true, + } + + self.load_state = 'loading' + + -- Handle empty file list + if state.total == 0 then + return Promise.resolve(self:_handle_all_loaded(state, opts)) + end + + -- Load in two batches: open buffers first, then rest + return self + :_load_batch(open_files, 1, state, opts) + :next(function() + if #rest_files == 0 then + return + end + -- Yield to event loop between batches. Uses 1ms because defer_fn(fn, 0) + -- can execute immediately without yielding. 1ms ensures Neovim processes + -- pending keyboard/UI events, keeping the editor responsive during loading. + return Promise.new(function(resolve) + vim.defer_fn(function() + self:_load_batch(rest_files, 2, state, opts):next(resolve) + end, 1) + end) + end) + :next(function() + return self:_handle_all_loaded(state, opts) + end) +end + +---Get the current progressive loading state +---@return OrgLoadProgress? +function OrgFiles:get_load_progress() + return self.progressive_state +end + +---@class OrgRequestLoadOpts +---@field async? boolean Use async progressive loading (default: true) +---@field on_file_loaded? fun(file: OrgFile, index: number, total: number) Callback per file loaded +---@field on_complete? fun(files: OrgFile[]) Callback when all files loaded +---@field order_by? 'mtime'|'name'|OrgLoadProgressiveSortFn Sort order for loading files +---@field direction? 'asc'|'desc' Sort direction +---@field batch_size? number Max files per batch (default: all in one batch) + +---Request files to be loaded. Idempotent - multiple calls share the same promise. +---This is the unified entry point for file loading. +---@param opts? OrgRequestLoadOpts +---@return OrgPromise +function OrgFiles:request_load(opts) + opts = opts or {} + local async = opts.async ~= false -- default to async + + -- Already loaded - return immediately + if self.load_state == 'loaded' then + if opts.on_complete then + opts.on_complete(self:all()) + end + return Promise.resolve(self) + end + + -- Already loading - chain onto existing promise + if self.load_state == 'loading' and self._load_promise then + return self._load_promise:next(function() + if opts.on_complete then + opts.on_complete(self:all()) + end + return self + end) + end + + -- Start loading - note: load_state is set by load()/load_progressive() + if async then + self._load_promise = self:load_progressive(opts --[[@as OrgLoadProgressiveOpts]]) + else + -- Use force=true to ensure loading happens even if load_state was previously set + self._load_promise = self:load(true):next(function() + if opts.on_complete then + opts.on_complete(self:all()) + end + return self + end) + end + + return self._load_promise +end + +---Synchronous version of request_load for blocking contexts. +---@param timeout? number Timeout in milliseconds (default: 20000) +---@return OrgFiles +function OrgFiles:request_load_sync(timeout) + return self:request_load({ async = false }):wait(timeout or 20000) +end + return OrgFiles diff --git a/lua/orgmode/init.lua b/lua/orgmode/init.lua index 0d12e6cf1..d5d550b95 100644 --- a/lua/orgmode/init.lua +++ b/lua/orgmode/init.lua @@ -3,6 +3,8 @@ _G.Org = _G.Org or {} ---@type Org | nil local instance = nil +local emit = require('orgmode.utils.emit') + local auto_instance_keys = { files = true, agenda = true, @@ -27,6 +29,8 @@ local auto_instance_keys = { ---@field org_mappings OrgMappings ---@field notifications OrgNotifications ---@field links OrgLinks +---@field private _file_loaded_callbacks fun(file: OrgFile, index: number, total: number)[] +---@field private _files_loaded_callbacks fun(files: OrgFile[])[] local Org = {} setmetatable(Org, { __index = function(tbl, key) @@ -41,34 +45,132 @@ function Org:new() require('orgmode.org.global')(self) self.initialized = false self.setup_called = false + self._file_loaded_callbacks = {} + self._files_loaded_callbacks = {} self:setup_autocmds() require('orgmode.config'):setup_ts_predicates() return self end +---Show profiling results (delegates to Profiler module) +function Org.profiling() + require('orgmode.utils.profiler').show() +end + function Org:init() if self.initialized then return end + self.buffers = require('orgmode.state.buffers').init() + + local profiler = create_profiler('init') + profiler.mark('START') + + emit.profile('start', 'init', 'START') + + local config = require('orgmode.config') + emit.profile('mark', 'init', 'require config') + require('orgmode.events').init() + emit.profile('mark', 'init', 'events.init()') + self.highlighter = require('orgmode.colors.highlighter'):new() + emit.profile('mark', 'init', 'highlighter:new()') + require('orgmode.colors.highlights').define_highlights() - self.files = require('orgmode.files') - :new({ - paths = require('orgmode.config').org_agenda_files, - }) - :load_sync(true, 20000) + emit.profile('mark', 'init', 'define_highlights()') + + self.files = require('orgmode.files'):new({ + local config = require('orgmode.config') + paths = config.org_agenda_files, + }) + emit.profile('mark', 'init', 'OrgFiles:new()') + + emit.profile('mark', 'init', string.format('org_async_loading = %s', tostring(config.org_async_loading))) + + -- Callback wrappers to fire registered callbacks + local function on_file_loaded(file, index, total) + for _, callback in ipairs(self._file_loaded_callbacks) do + callback(file, index, total) + end + end + + local function on_complete(files) + -- Emit batch-level profiling events + local progress = self.files:get_load_progress() + if progress and progress.batch_timings then + emit.profile('start', 'files', string.format('START (%d files)', progress.total), { total = progress.total }) + + for _, batch in ipairs(progress.batch_timings) do + local mem_delta = batch.mem_after_kb and batch.mem_before_kb and (batch.mem_after_kb - batch.mem_before_kb) + or nil + local batch_name = batch.batch_num == 1 and 'open buffers' or 'remaining' + emit.profile( + 'mark', + 'files', + string.format('batch %d (%s): %d files', batch.batch_num, batch_name, batch.files_count), + { + total_ms = batch.cumulative_ms, + duration_ms = batch.duration_ms, + yield_gap_ms = batch.gap_from_prev_ms, + total_bytes = batch.total_bytes, + mem_delta_kb = mem_delta, + } + ) + end + + -- Calculate final wall time from the last batch + local last_batch = progress.batch_timings[#progress.batch_timings] + local final_wall_ms = last_batch and last_batch.cumulative_ms or 0 + + emit.profile('complete', 'files', 'COMPLETE', { + total = progress.total, + total_ms = final_wall_ms, + duration_ms = 0, -- COMPLETE is just a marker, no additional duration + }) + end + + -- Fire registered callbacks + for _, callback in ipairs(self._files_loaded_callbacks) do + callback(files) + end + end + + if config.org_async_loading then + -- Defer to next event loop to allow buffer display first + vim.schedule(function() + self.files:request_load({ + async = true, + on_file_loaded = on_file_loaded, + on_complete = on_complete, + }) + end) + emit.profile('mark', 'init', 'scheduled request_load (async)') + else + self.files:request_load_sync() + emit.profile('mark', 'init', 'request_load_sync COMPLETE') + on_complete(self.files:all()) + end + self.links = require('orgmode.org.links'):new({ files = self.files }) + emit.profile('mark', 'init', 'links:new()') + self.agenda = require('orgmode.agenda'):new({ files = self.files, highlighter = self.highlighter, links = self.links, }) + emit.profile('mark', 'init', 'agenda:new()') + self.capture = require('orgmode.capture'):new({ files = self.files, }) + emit.profile('mark', 'init', 'capture:new()') + self.completion = require('orgmode.org.autocompletion'):new({ files = self.files, links = self.links }) + emit.profile('mark', 'init', 'completion:new()') + self.org_mappings = require('orgmode.org.mappings'):new({ capture = self.capture, agenda = self.agenda, @@ -76,13 +178,20 @@ function Org:init() links = self.links, completion = self.completion, }) + emit.profile('mark', 'init', 'org_mappings:new()') + self.clock = require('orgmode.clock'):new({ files = self.files, }) + emit.profile('mark', 'init', 'clock:new()') + self.statusline_debounced = require('orgmode.utils').debounce('statusline', function() return self.clock:get_statusline() end, 300) + emit.profile('mark', 'init', 'statusline setup') + self.initialized = true + emit.profile('complete', 'init', 'COMPLETE') end ---@param file? string @@ -113,7 +222,14 @@ function Org:setup_autocmds() pattern = 'org', group = org_augroup, callback = function() - self:reload(vim.fn.expand(':p')) + -- Defer to let buffer display first, then initialize + local file = vim.fn.expand(':p') + vim.schedule(function() + emit.profile('start', 'filetype', 'START') + self:reload(file):next(function() + emit.profile('complete', 'filetype', 'reload() complete') + end) + end) end, }) vim.api.nvim_create_autocmd('ColorScheme', { @@ -153,20 +269,34 @@ function Org.setup(opts) opts = opts or {} local config = require('orgmode.config'):extend(opts) config:install_grammar() + + -- Initialize profiler based on config + require('orgmode.utils.profiler').setup({ + enabled = config.profiling and config.profiling.enabled or false, + }) + instance = Org:new() instance.setup_called = true - vim.defer_fn(function() - if config.notifications.enabled and #vim.api.nvim_list_uis() > 0 then - Org.files:load():next(vim.schedule_wrap(function() + + -- Register notifications via callback - no separate load() call! + -- Notifications will start after files are loaded by Org:init() + if config.notifications.enabled and #vim.api.nvim_list_uis() > 0 then + instance:on_files_loaded(function() + -- Defer to let the UI settle after file loading completes + vim.defer_fn(function() instance.notifications = require('orgmode.notifications') :new({ files = Org.files, }) :start_timer() - end)) - end + end, 1000) + end) + end + + vim.defer_fn(function() config:setup_mappings('global') end, 1) + return instance end @@ -254,6 +384,85 @@ function Org.destroy() end end +--- Scan all org files and return metadata without full parsing. +--- This is a fast operation that only reads file system metadata (mtime, size). +---@return OrgFileScanResult[] +function Org:scan_files() + self:init() + return self.files:scan() +end + +--- Load files progressively with callbacks for incremental updates. +--- Files are sorted by mtime (newest first) by default. +---@param opts? OrgLoadProgressiveOpts +---@return OrgPromise +function Org:load_files(opts) + self:init() + opts = opts or {} + + -- Wrap callbacks to also fire registered event callbacks + local original_on_file_loaded = opts.on_file_loaded + local original_on_complete = opts.on_complete + + opts.on_file_loaded = function(file, index, total) + -- Fire registered callbacks + for _, callback in ipairs(self._file_loaded_callbacks) do + callback(file, index, total) + end + -- Fire original callback if provided + if original_on_file_loaded then + original_on_file_loaded(file, index, total) + end + end + + opts.on_complete = function(files) + -- Fire registered callbacks + for _, callback in ipairs(self._files_loaded_callbacks) do + callback(files) + end + -- Fire original callback if provided + if original_on_complete then + original_on_complete(files) + end + end + + return self.files:load_progressive(opts) +end + +--- Check if files have been loaded. +---@return boolean +function Org:is_files_loaded() + if not self.initialized then + return false + end + local progress = self.files:get_load_progress() + if not progress then + return false + end + return not progress.loading and progress.loaded == progress.total +end + +--- Get current file loading progress. +---@return { loaded: number, total: number, loading: boolean }? +function Org:get_files_progress() + if not self.initialized then + return nil + end + return self.files:get_load_progress() +end + +--- Register a callback to be called when each file is loaded. +---@param callback fun(file: OrgFile, index: number, total: number) +function Org:on_file_loaded(callback) + table.insert(self._file_loaded_callbacks, callback) +end + +--- Register a callback to be called when all files are loaded. +---@param callback fun(files: OrgFile[]) +function Org:on_files_loaded(callback) + table.insert(self._files_loaded_callbacks, callback) +end + function Org.is_setup_called() if not instance then return false diff --git a/lua/orgmode/notifications/init.lua b/lua/orgmode/notifications/init.lua index 273284b1f..f421daaf6 100644 --- a/lua/orgmode/notifications/init.lua +++ b/lua/orgmode/notifications/init.lua @@ -24,7 +24,8 @@ end function Notifications:start_timer() self:stop_timer() self.timer = vim.uv.new_timer() - self:notify(Date.now()) + -- No immediate notify() - timer fires at next minute boundary + -- Files are already loaded when start_timer() is called (via on_files_loaded callback) self.timer:start( (60 - os.date('%S')) * 1000, 60000, diff --git a/lua/orgmode/org/global.lua b/lua/orgmode/org/global.lua index f72330529..ea0907093 100644 --- a/lua/orgmode/org/global.lua +++ b/lua/orgmode/org/global.lua @@ -98,6 +98,9 @@ local build = function(orgmode) indent_mode = function() require('orgmode.ui.virtual_indent').toggle_buffer_indent_mode() end, + profiling = function() + require('orgmode').profiling() + end, } _G.Org = OrgGlobal diff --git a/lua/orgmode/utils/emit.lua b/lua/orgmode/utils/emit.lua new file mode 100644 index 000000000..a245dacc8 --- /dev/null +++ b/lua/orgmode/utils/emit.lua @@ -0,0 +1,15 @@ +local EventManager = require('orgmode.events') +local ProfilingEvent = require('orgmode.events.types.profiling_event') + +local M = {} + +-- Events dispatch regardless of profiler state; profiler decides whether to process +---@param action 'start'|'mark'|'complete' +---@param category string +---@param label string +---@param payload? table +function M.profile(action, category, label, payload) + EventManager.dispatch(ProfilingEvent[action](ProfilingEvent, category, label, payload)) +end + +return M diff --git a/lua/orgmode/utils/profiler.lua b/lua/orgmode/utils/profiler.lua new file mode 100644 index 000000000..8e87f0b70 --- /dev/null +++ b/lua/orgmode/utils/profiler.lua @@ -0,0 +1,470 @@ +---@class OrgProfiler +---@field private _enabled boolean +---@field private _sessions table +---@field private _section_config table +---@field private _event_subscriptions table +local Profiler = { + _enabled = false, + _sessions = {}, + _section_config = {}, + _event_subscriptions = {}, +} + +---@class OrgProfilerSession +---@field category string Session category (e.g., 'init', 'files', 'plugin:my-plugin') +---@field start_time number High-resolution start timestamp (vim.uv.hrtime) +---@field entries OrgProfilerEntry[] Collected timing entries + +---@class OrgProfilerEntry +---@field label string Human-readable label +---@field timestamp number High-resolution timestamp +---@field total_ms number Milliseconds since session start +---@field delta_ms number Milliseconds since previous entry +---@field payload? table Optional event payload data + +---@class OrgProfilerSectionConfig +---@field title string Display title +---@field order number Sort order (lower = earlier) +---@field format 'simple'|'batch' Display format +---@field description? string Hover/help text + +-- Pre-registered core sections +local DEFAULT_SECTIONS = { + init = { + title = 'Org:init()', + order = 1, + format = 'simple', + description = 'Core initialization timing', + }, + files = { + title = 'Progressive Loading', + order = 2, + format = 'batch', + description = 'File loading with batch timing', + }, + clock = { + title = 'Clock Lazy Init', + order = 3, + format = 'simple', + description = 'Clock lazy initialization', + }, + filetype = { + title = 'FileType Reload', + order = 4, + format = 'simple', + description = 'FileType autocmd reload timing', + }, +} + +---@param opts? { enabled?: boolean } +function Profiler.setup(opts) + opts = opts or {} + Profiler._enabled = opts.enabled or false + Profiler._sessions = {} + Profiler._section_config = vim.deepcopy(DEFAULT_SECTIONS) + Profiler._event_subscriptions = {} + + if Profiler._enabled then + Profiler._subscribe_to_events() + end +end + +---@return boolean +function Profiler.is_enabled() + return Profiler._enabled +end + +---@return table +function Profiler.get_data() + return Profiler._sessions +end + +function Profiler.clear() + Profiler._sessions = {} +end + +---Register a custom profiling section +---@param category string Unique category (recommend 'plugin:') +---@param opts OrgProfilerSectionConfig +---@return boolean success +function Profiler.register_section(category, opts) + if Profiler._section_config[category] then + return false -- already registered + end + Profiler._section_config[category] = { + title = opts.title or category, + order = opts.order or 1000, -- plugins sort after core + format = opts.format or 'simple', + description = opts.description, + } + return true +end + +---@class OrgProfilerHandle +---@field mark fun(label: string, payload?: table) Record a timing mark +---@field finish fun() Complete the session and store data +---@field cancel fun() Cancel the session without storing + +---Create a new profiling session for custom code +---@param category string Unique category name (recommend: 'plugin:') +---@return OrgProfilerHandle +function Profiler.create_session(category) + if not Profiler._enabled then + -- Return no-op handle when profiling disabled + return { + mark = function() end, + finish = function() end, + cancel = function() end, + } + end + + Profiler._ensure_section(category) + + local start_time = vim.uv.hrtime() + local entries = {} + local last_time = start_time + local cancelled = false + + return { + mark = function(label, payload) + if cancelled then + return + end + local now = vim.uv.hrtime() + table.insert(entries, { + label = label, + timestamp = now, + total_ms = (now - start_time) / 1e6, + delta_ms = (now - last_time) / 1e6, + payload = payload, + }) + last_time = now + end, + finish = function() + if cancelled then + return + end + Profiler._sessions[category] = { + category = category, + start_time = start_time, + entries = entries, + } + end, + cancel = function() + cancelled = true + end, + } +end + +---@private +---@param category string +function Profiler._ensure_section(category) + if not Profiler._section_config[category] then + -- Auto-create for unregistered categories + Profiler._section_config[category] = { + title = category:gsub('^plugin:', 'Plugin: '):gsub('^%l', string.upper), + order = 9999, -- sort last + format = 'simple', + } + end +end + +---Subscribe to profiling events (called during setup if enabled) +---@private +function Profiler._subscribe_to_events() + local EventManager = require('orgmode.events') + local ProfilingEvent = require('orgmode.events.types.profiling_event') + EventManager.listen(ProfilingEvent, Profiler._on_profiling_event) +end + +---Handle a profiling event +---@private +---@param event OrgProfilingEvent +function Profiler._on_profiling_event(event) + if not Profiler._enabled then + return + end + + local category = event.category + Profiler._ensure_section(category) + + if event.action == 'start' then + -- Initialize a new session + Profiler._sessions[category] = { + category = category, + start_time = vim.uv.hrtime(), + entries = {}, + } + -- Add the start entry + local session = Profiler._sessions[category] + table.insert(session.entries, { + label = event.label, + timestamp = session.start_time, + total_ms = 0, + delta_ms = 0, + payload = event.payload, + }) + elseif event.action == 'mark' or event.action == 'complete' then + local session = Profiler._sessions[category] + if not session then + -- Session wasn't started, create it now + session = { + category = category, + start_time = vim.uv.hrtime(), + entries = {}, + } + Profiler._sessions[category] = session + end + + local now = vim.uv.hrtime() + local last_entry = session.entries[#session.entries] + local last_time = last_entry and last_entry.timestamp or session.start_time + + table.insert(session.entries, { + label = event.label, + timestamp = now, + total_ms = (now - session.start_time) / 1e6, + delta_ms = (now - last_time) / 1e6, + payload = event.payload, + }) + end +end + +---Format milliseconds for display +---@private +---@param ms number +---@return string +local function format_ms(ms) + if ms >= 1000 then + return string.format('%7.2f s', ms / 1000) + else + return string.format('%7.2f ms', ms) + end +end + +---Add a simple section to output lines +---@private +---@param lines string[] +---@param title string +---@param entries OrgProfilerEntry[] +local function add_simple_section(lines, title, entries) + if #entries == 0 then + return + end + + table.insert(lines, string.format('## %s', title)) + table.insert(lines, '') + table.insert(lines, string.format(' %-40s %10s %10s', 'Step', 'Total', 'Delta')) + table.insert(lines, string.format(' %s %s %s', string.rep('-', 40), string.rep('-', 10), string.rep('-', 10))) + + for _, entry in ipairs(entries) do + local payload_str = '' + if entry.payload then + local parts = {} + for k, v in pairs(entry.payload) do + table.insert(parts, string.format('%s=%s', k, tostring(v))) + end + if #parts > 0 then + payload_str = ' ' .. table.concat(parts, ', ') + end + end + table.insert( + lines, + string.format(' %-40s %s %s%s', entry.label, format_ms(entry.total_ms), format_ms(entry.delta_ms), payload_str) + ) + end + table.insert(lines, '') +end + +---Format memory delta for display with consistent width +---@private +---@param mem_delta_kb number +---@return string +local function format_mem(mem_delta_kb) + return string.format('%+7.1f MB', mem_delta_kb / 1024) +end + +---Add a batch section to output lines (for file loading with memory/yield info) +---@private +---@param lines string[] +---@param title string +---@param entries OrgProfilerEntry[] +local function add_batch_section(lines, title, entries) + if #entries == 0 then + return + end + + table.insert(lines, string.format('## %s', title)) + table.insert(lines, '') + + -- Check if any entry has batch-specific payload + local has_batch_data = false + for _, entry in ipairs(entries) do + if entry.payload and (entry.payload.mem_delta_kb or entry.payload.yield_gap_ms) then + has_batch_data = true + break + end + end + + if not has_batch_data then + -- Simple format without batch data + table.insert(lines, string.format(' %-40s %10s %10s', 'Step', 'Total', 'Delta')) + table.insert(lines, string.format(' %s %s %s', string.rep('-', 40), string.rep('-', 10), string.rep('-', 10))) + for _, entry in ipairs(entries) do + table.insert( + lines, + string.format(' %-40s %s %s', entry.label, format_ms(entry.total_ms), format_ms(entry.delta_ms)) + ) + end + table.insert(lines, '') + return + end + + -- Batch format: first pass to collect data and calculate column widths + local headers = { 'Step', 'Since Start', 'Batch Time', 'Idle', 'Mem Δ' } + local rows = {} + local async_scheduling_row = nil + + for _, entry in ipairs(entries) do + local payload = entry.payload or {} + local idle = payload.yield_gap_ms + local mem_delta = payload.mem_delta_kb + local since_start = payload.total_ms or entry.total_ms + local batch_time = payload.duration_ms or entry.delta_ms + + -- Detect async scheduling delay before first batch + if not async_scheduling_row and batch_time and since_start and batch_time > 0 then + local scheduling_delay = since_start - batch_time - (idle or 0) + if scheduling_delay > 1 then + async_scheduling_row = { + '(async scheduling)', + format_ms(scheduling_delay), + '', + '', + '', + } + end + end + + -- Only show values if meaningful (> 0) + local show_batch_time = batch_time and batch_time > 0 + local show_idle = idle and idle > 0 + local show_mem = mem_delta and mem_delta ~= 0 + + table.insert(rows, { + entry.label, + format_ms(since_start), + show_batch_time and format_ms(batch_time) or '', + show_idle and format_ms(idle) or '', + show_mem and format_mem(mem_delta) or '', + }) + end + + -- Insert async scheduling row after first entry (START) if present + if async_scheduling_row and #rows > 0 then + table.insert(rows, 2, async_scheduling_row) + end + + -- Calculate column widths: max of header and all content + local col_widths = {} + for i, header in ipairs(headers) do + col_widths[i] = #header + end + for _, row in ipairs(rows) do + for i, cell in ipairs(row) do + col_widths[i] = math.max(col_widths[i], #cell) + end + end + + -- Build format string with calculated widths + local header_fmt = ' %-' .. col_widths[1] .. 's' + local row_fmt = ' %-' .. col_widths[1] .. 's' + local dash_parts = { string.rep('-', col_widths[1]) } + for i = 2, #col_widths do + header_fmt = header_fmt .. ' %' .. col_widths[i] .. 's' + row_fmt = row_fmt .. ' %' .. col_widths[i] .. 's' + table.insert(dash_parts, string.rep('-', col_widths[i])) + end + + -- Output header + table.insert(lines, string.format(header_fmt, unpack(headers))) + table.insert(lines, ' ' .. table.concat(dash_parts, ' ')) + + -- Output rows + for _, row in ipairs(rows) do + table.insert(lines, string.format(row_fmt, unpack(row))) + end + table.insert(lines, '') +end + +---Display profiling results in floating window +function Profiler.show() + local lines = { '# Orgmode Profiling Results', '' } + + -- Get sorted section categories + local categories = {} + for category, _ in pairs(Profiler._sessions) do + table.insert(categories, category) + end + table.sort(categories, function(a, b) + local order_a = Profiler._section_config[a] and Profiler._section_config[a].order or 9999 + local order_b = Profiler._section_config[b] and Profiler._section_config[b].order or 9999 + if order_a ~= order_b then + return order_a < order_b + end + return a < b + end) + + local has_data = false + for _, category in ipairs(categories) do + local session = Profiler._sessions[category] + if session and #session.entries > 0 then + has_data = true + local config = Profiler._section_config[category] or { title = category, format = 'simple' } + + if config.format == 'batch' then + add_batch_section(lines, config.title, session.entries) + else + add_simple_section(lines, config.title, session.entries) + end + end + end + + if not has_data then + table.insert(lines, 'No profiling data available yet.') + table.insert(lines, 'Open an org file first to collect timing data.') + end + + -- Create floating window + local buf = vim.api.nvim_create_buf(false, true) + vim.api.nvim_buf_set_lines(buf, 0, -1, false, lines) + vim.api.nvim_set_option_value('modifiable', false, { buf = buf }) + vim.api.nvim_set_option_value('filetype', 'markdown', { buf = buf }) + + local width = math.min(120, vim.o.columns - 4) + local height = math.min(#lines + 2, vim.o.lines - 4) + local row = math.floor((vim.o.lines - height) / 2) + local col = math.floor((vim.o.columns - width) / 2) + + local win = vim.api.nvim_open_win(buf, true, { + relative = 'editor', + width = width, + height = height, + row = row, + col = col, + style = 'minimal', + border = 'rounded', + title = ' Orgmode Profiling ', + title_pos = 'center', + }) + + -- Close on q or + vim.keymap.set('n', 'q', function() + vim.api.nvim_win_close(win, true) + end, { buffer = buf }) + vim.keymap.set('n', '', function() + vim.api.nvim_win_close(win, true) + end, { buffer = buf }) +end + +return Profiler diff --git a/tests/plenary/files/orgfiles_spec.lua b/tests/plenary/files/orgfiles_spec.lua new file mode 100644 index 000000000..6490cb42c --- /dev/null +++ b/tests/plenary/files/orgfiles_spec.lua @@ -0,0 +1,453 @@ +local OrgFiles = require('orgmode.files') + +describe('OrgFiles', function() + local temp_dir + local test_files = {} + + before_each(function() + -- Use resolve() to get canonical path (handles macOS /var -> /private/var symlink) + temp_dir = vim.fn.resolve(vim.fn.tempname()) + vim.fn.mkdir(temp_dir, 'p') + test_files = {} + end) + + after_each(function() + for _, file in ipairs(test_files) do + vim.fn.delete(file) + end + vim.fn.delete(temp_dir, 'rf') + end) + + ---@param name string + ---@param content string[] + ---@return string filepath + local function create_test_file(name, content) + local filepath = temp_dir .. '/' .. name + vim.fn.writefile(content, filepath) + table.insert(test_files, filepath) + return filepath + end + + describe('scan', function() + it('should return metadata for all org files', function() + local file1 = create_test_file('test1.org', { '* Headline 1' }) + local file2 = create_test_file('test2.org', { '* Headline 2', '** Sub headline' }) + + local files = OrgFiles:new({ paths = { temp_dir .. '/*.org' } }) + local metadata = files:scan() + + assert.are.equal(2, #metadata) + + -- Verify structure of returned metadata + for _, entry in ipairs(metadata) do + assert.is_not_nil(entry.filename) + assert.is_not_nil(entry.mtime_sec) + assert.is_not_nil(entry.mtime_nsec) + assert.is_not_nil(entry.size) + assert.is_true(entry.size > 0) + end + + -- Verify filenames are correct + local filenames = vim.tbl_map(function(m) + return m.filename + end, metadata) + table.sort(filenames) + assert.is_true(vim.tbl_contains(filenames, file1)) + assert.is_true(vim.tbl_contains(filenames, file2)) + end) + + it('should return empty table for empty paths', function() + local empty_dir = vim.fn.tempname() + vim.fn.mkdir(empty_dir, 'p') + + local files = OrgFiles:new({ paths = { empty_dir .. '/*.org' } }) + local metadata = files:scan() + + assert.are.same({}, metadata) + vim.fn.delete(empty_dir, 'rf') + end) + + it('should filter out non-org files', function() + create_test_file('test.org', { '* Headline' }) + create_test_file('readme.txt', { 'Not an org file' }) + create_test_file('notes.md', { '# Markdown' }) + + local files = OrgFiles:new({ paths = { temp_dir .. '/*' } }) + local metadata = files:scan() + + assert.are.equal(1, #metadata) + assert.is_true(metadata[1].filename:match('%.org$') ~= nil) + end) + + it('should return correct mtime matching file state', function() + local filepath = create_test_file('timed.org', { '* Test' }) + + local files = OrgFiles:new({ paths = { temp_dir .. '/*.org' } }) + local metadata = files:scan() + + assert.are.equal(1, #metadata) + + local stat = vim.uv.fs_stat(filepath) + assert.are.equal(stat.mtime.sec, metadata[1].mtime_sec) + assert.are.equal(stat.mtime.nsec, metadata[1].mtime_nsec) + assert.are.equal(stat.size, metadata[1].size) + end) + + it('should handle missing/deleted files gracefully', function() + local file1 = create_test_file('exists.org', { '* Exists' }) + local file2 = create_test_file('will_delete.org', { '* Will be deleted' }) + + local files = OrgFiles:new({ paths = { temp_dir .. '/*.org' } }) + + -- Delete file before scanning + vim.fn.delete(file2) + + local metadata = files:scan() + + assert.are.equal(1, #metadata) + assert.are.equal(file1, metadata[1].filename) + end) + + it('should be fast (benchmark with multiple files)', function() + -- Create 20 files for benchmark + for i = 1, 20 do + create_test_file(string.format('file%02d.org', i), { '* Headline ' .. i }) + end + + local files = OrgFiles:new({ paths = { temp_dir .. '/*.org' } }) + + local start = vim.uv.hrtime() + local metadata = files:scan() + local elapsed_ms = (vim.uv.hrtime() - start) / 1e6 + + assert.are.equal(20, #metadata) + -- Should complete in under 100ms for 20 files (very conservative) + assert.is_true(elapsed_ms < 100, 'scan() took ' .. elapsed_ms .. 'ms, expected < 100ms') + end) + end) + + describe('_sort_metadata', function() + it('should sort by mtime descending by default', function() + local files = OrgFiles:new({ paths = {} }) + local metadata = { + { filename = 'old.org', mtime_sec = 100, mtime_nsec = 0 }, + { filename = 'new.org', mtime_sec = 200, mtime_nsec = 0 }, + } + + files:_sort_metadata(metadata, {}) + + assert.are.equal('new.org', metadata[1].filename) + assert.are.equal('old.org', metadata[2].filename) + end) + + it('should sort by mtime ascending when specified', function() + local files = OrgFiles:new({ paths = {} }) + local metadata = { + { filename = 'new.org', mtime_sec = 200, mtime_nsec = 0 }, + { filename = 'old.org', mtime_sec = 100, mtime_nsec = 0 }, + } + + files:_sort_metadata(metadata, { order_by = 'mtime', direction = 'asc' }) + + assert.are.equal('old.org', metadata[1].filename) + assert.are.equal('new.org', metadata[2].filename) + end) + + it('should use mtime_nsec as tiebreaker', function() + local files = OrgFiles:new({ paths = {} }) + local metadata = { + { filename = 'first.org', mtime_sec = 100, mtime_nsec = 500 }, + { filename = 'second.org', mtime_sec = 100, mtime_nsec = 1000 }, + } + + files:_sort_metadata(metadata, { order_by = 'mtime', direction = 'desc' }) + + assert.are.equal('second.org', metadata[1].filename) + assert.are.equal('first.org', metadata[2].filename) + end) + + it('should sort by name ascending by default', function() + local files = OrgFiles:new({ paths = {} }) + local metadata = { + { filename = 'zebra.org' }, + { filename = 'alpha.org' }, + { filename = 'middle.org' }, + } + + files:_sort_metadata(metadata, { order_by = 'name' }) + + assert.are.equal('alpha.org', metadata[1].filename) + assert.are.equal('middle.org', metadata[2].filename) + assert.are.equal('zebra.org', metadata[3].filename) + end) + + it('should sort by name descending when specified', function() + local files = OrgFiles:new({ paths = {} }) + local metadata = { + { filename = 'alpha.org' }, + { filename = 'zebra.org' }, + } + + files:_sort_metadata(metadata, { order_by = 'name', direction = 'desc' }) + + assert.are.equal('zebra.org', metadata[1].filename) + assert.are.equal('alpha.org', metadata[2].filename) + end) + + it('should use custom sort function', function() + local files = OrgFiles:new({ paths = {} }) + local metadata = { + { filename = 'small.org', size = 10 }, + { filename = 'large.org', size = 100 }, + } + + files:_sort_metadata(metadata, { + order_by = function(a, b) + return a.size > b.size + end, + }) + + assert.are.equal('large.org', metadata[1].filename) + assert.are.equal('small.org', metadata[2].filename) + end) + end) + + describe('load_progressive', function() + it('should load all files and call on_complete callback', function() + create_test_file('test1.org', { '* Headline 1' }) + create_test_file('test2.org', { '* Headline 2' }) + create_test_file('test3.org', { '* Headline 3' }) + + local files = OrgFiles:new({ paths = { temp_dir .. '/*.org' } }) + local completed = false + local loaded_files = {} + + files + :load_progressive({ + current_buffer_first = false, + on_complete = function(all_files) + completed = true + loaded_files = all_files + end, + }) + :wait() + + assert.is_true(completed) + assert.are.equal(3, #loaded_files) + assert.are.equal(3, #files:all()) + end) + + it('should call on_file_loaded callback for each file', function() + create_test_file('test1.org', { '* Headline 1' }) + create_test_file('test2.org', { '* Headline 2' }) + + local files = OrgFiles:new({ paths = { temp_dir .. '/*.org' } }) + local callback_calls = {} + + files + :load_progressive({ + current_buffer_first = false, + on_file_loaded = function(file, index, total) + table.insert(callback_calls, { + filename = file.filename, + index = index, + total = total, + }) + end, + }) + :wait() + + assert.are.equal(2, #callback_calls) + for _, call in ipairs(callback_calls) do + assert.are.equal(2, call.total) + assert.is_true(call.index >= 1 and call.index <= 2) + end + end) + + it('should apply filter before loading', function() + create_test_file('include.org', { '* Include me' }) + create_test_file('exclude.org', { '* Exclude me' }) + + local files = OrgFiles:new({ paths = { temp_dir .. '/*.org' } }) + local loaded_files = {} + + files + :load_progressive({ + current_buffer_first = false, + filter = function(metadata) + return metadata.filename:match('include') ~= nil + end, + on_complete = function(all_files) + loaded_files = all_files + end, + }) + :wait() + + assert.are.equal(1, #loaded_files) + assert.is_true(loaded_files[1].filename:match('include') ~= nil) + end) + + it('should track progress state correctly', function() + create_test_file('test1.org', { '* Headline 1' }) + create_test_file('test2.org', { '* Headline 2' }) + + local files = OrgFiles:new({ paths = { temp_dir .. '/*.org' } }) + + -- Initially no progress state + assert.is_nil(files:get_load_progress()) + + files + :load_progressive({ + current_buffer_first = false, + }) + :wait() + + local progress = files:get_load_progress() + assert.is_not_nil(progress) + assert.are.equal(2, progress.loaded) + assert.are.equal(2, progress.total) + assert.is_false(progress.loading) + end) + + it('should handle empty file list gracefully', function() + local empty_dir = vim.fn.tempname() + vim.fn.mkdir(empty_dir, 'p') + + local files = OrgFiles:new({ paths = { empty_dir .. '/*.org' } }) + local completed = false + + files + :load_progressive({ + on_complete = function() + completed = true + end, + }) + :wait() + + assert.is_true(completed) + local progress = files:get_load_progress() + assert.are.equal(0, progress.total) + assert.are.equal(0, progress.loaded) + assert.is_false(progress.loading) + + vim.fn.delete(empty_dir, 'rf') + end) + end) + + describe('request_load', function() + it('should be idempotent - multiple calls load files only once', function() + create_test_file('test1.org', { '* Headline 1' }) + create_test_file('test2.org', { '* Headline 2' }) + + local files = OrgFiles:new({ paths = { temp_dir .. '/*.org' } }) + local load_count = 0 + + -- Call request_load twice before either completes + local promise1 = files:request_load({ + on_complete = function() + load_count = load_count + 1 + end, + }) + local promise2 = files:request_load({ + on_complete = function() + load_count = load_count + 1 + end, + }) + + -- Wait for both + promise1:wait() + promise2:wait() + + -- Files should be loaded exactly once, both callbacks called + assert.are.equal('loaded', files.load_state) + assert.are.equal(2, #files:all()) + assert.are.equal(2, load_count) -- Both callbacks called + end) + + it('should chain onto existing promise when already loading', function() + create_test_file('test1.org', { '* Headline 1' }) + + local files = OrgFiles:new({ paths = { temp_dir .. '/*.org' } }) + local callback_order = {} + + -- Start first load + local promise1 = files:request_load({ + on_complete = function() + table.insert(callback_order, 'first') + end, + }) + + -- Chain second load while first is in progress + local promise2 = files:request_load({ + on_complete = function() + table.insert(callback_order, 'second') + end, + }) + + -- Wait for both + promise1:wait() + promise2:wait() + + -- Both callbacks should have been called + assert.are.equal(2, #callback_order) + assert.is_true(vim.tbl_contains(callback_order, 'first')) + assert.is_true(vim.tbl_contains(callback_order, 'second')) + end) + + it('should return resolved promise when already loaded', function() + create_test_file('test1.org', { '* Headline 1' }) + + local files = OrgFiles:new({ paths = { temp_dir .. '/*.org' } }) + + -- First load + files:request_load():wait() + assert.are.equal('loaded', files.load_state) + + -- Second call should return immediately + local callback_called = false + local promise = files:request_load({ + on_complete = function() + callback_called = true + end, + }) + + -- Should resolve immediately (already loaded) + promise:wait() + assert.is_true(callback_called) + end) + end) + + describe('request_load_sync', function() + it('should block until files are loaded', function() + create_test_file('test1.org', { '* Headline 1' }) + create_test_file('test2.org', { '* Headline 2' }) + + local files = OrgFiles:new({ paths = { temp_dir .. '/*.org' } }) + + -- Should block and return loaded files + local result = files:request_load_sync(5000) + + assert.are.equal('loaded', files.load_state) + assert.are.equal(files, result) + assert.are.equal(2, #files:all()) + end) + + it('should work when already loaded', function() + create_test_file('test1.org', { '* Headline 1' }) + + local files = OrgFiles:new({ paths = { temp_dir .. '/*.org' } }) + + -- First load + files:request_load_sync(5000) + + -- Second call should return immediately + local start = vim.uv.hrtime() + local result = files:request_load_sync(5000) + local elapsed_ms = (vim.uv.hrtime() - start) / 1e6 + + assert.are.equal(files, result) + -- Should be nearly instant (< 10ms) since already loaded + assert.is_true(elapsed_ms < 10, 'request_load_sync took ' .. elapsed_ms .. 'ms when already loaded') + end) + end) +end) diff --git a/tests/plenary/init_spec.lua b/tests/plenary/init_spec.lua index 47d52eb36..066e1bbb4 100644 --- a/tests/plenary/init_spec.lua +++ b/tests/plenary/init_spec.lua @@ -82,4 +82,79 @@ describe('Init', function() vim.cmd('norm >>') assert.are.same({ '** TODO Test' }, vim.api.nvim_buf_get_lines(0, 0, -1, false)) end) + + describe('Public API', function() + it('scan_files should return metadata for all org files', function() + local results = org:scan_files() + assert.is.table(results) + assert.is_true(#results > 0) + -- Check result structure + local first = results[1] + assert.is.string(first.filename) + assert.is.number(first.mtime_sec) + assert.is.number(first.mtime_nsec) + assert.is.number(first.size) + end) + + it('on_file_loaded should register callback', function() + org:on_file_loaded(function() end) + assert.is_true(#org._file_loaded_callbacks > 0) + end) + + it('on_files_loaded should register callback', function() + org:on_files_loaded(function() end) + assert.is_true(#org._files_loaded_callbacks > 0) + end) + + it('registered callbacks should fire during load_files', function() + local file_cb_count = 0 + local files_cb_called = false + org:on_file_loaded(function() + file_cb_count = file_cb_count + 1 + end) + org:on_files_loaded(function() + files_cb_called = true + end) + org:load_files():wait() + assert.is_true(file_cb_count > 0) + assert.is_true(files_cb_called) + end) + + it('load_files should load progressively and fire callbacks', function() + local file_loaded_count = 0 + local complete_called = false + local loaded_files = nil + + org + :load_files({ + on_file_loaded = function(file, index, total) + file_loaded_count = file_loaded_count + 1 + end, + on_complete = function(files) + complete_called = true + loaded_files = files + end, + }) + :wait() + + assert.is_true(file_loaded_count > 0) + assert.is_true(complete_called) + assert.is.table(loaded_files) + assert.are.same(file_loaded_count, #loaded_files) + end) + + it('is_files_loaded and get_files_progress should work after load_files', function() + org:load_files():wait() + + assert.is_true(org:is_files_loaded()) + + local progress = org:get_files_progress() + assert.is.table(progress) + assert.is.number(progress.loaded) + assert.is.number(progress.total) + assert.is.boolean(progress.loading) + assert.are.same(progress.loaded, progress.total) + assert.is_false(progress.loading) + end) + end) end) diff --git a/tests/plenary/ui/clock_spec.lua b/tests/plenary/ui/clock_spec.lua index 29a13515d..43bfa8ca7 100644 --- a/tests/plenary/ui/clock_spec.lua +++ b/tests/plenary/ui/clock_spec.lua @@ -164,6 +164,7 @@ describe('Clock', function() -- Establish baseline: Test 1 is clocked in local clock = orgmode.clock + assert.is_true(clock:has_clocked_headline()) -- Triggers lazy init assert.are.same('Test 1', clock.clocked_headline:get_title()) assert.is_true(clock.clocked_headline:is_clocked_in()) @@ -211,4 +212,99 @@ describe('Clock', function() '* TODO Test 3', }, vim.api.nvim_buf_get_lines(0, 0, -1, false)) end) + + describe('async preload', function() + it('should preload clocked headline after files are loaded', function() + local file = helpers.create_agenda_file({ + '* TODO Test headline', + ' :LOGBOOK:', + ' CLOCK: [2024-05-22 Wed 05:15]', + ' :END:', + }) + vim.cmd('edit ' .. file.filename) + + local clock = orgmode.clock + -- Async preload runs in background - wait for it to complete + vim.wait(1000, function() + return clock.clocked_headline ~= nil + end, 10) + + assert.is_true(clock._clocked_headline_searched) + assert.is_not_nil(clock.clocked_headline) + + -- Multiple calls are idempotent + local result1 = clock:has_clocked_headline() + local result2 = clock:has_clocked_headline() + assert.is_true(clock._clocked_headline_searched) + assert.is_true(result1) + assert.is_true(result2) + end) + + it('should return consistent results after async preload', function() + local file = helpers.create_agenda_file({ + '* TODO Clocked task', + ' :LOGBOOK:', + ' CLOCK: [2024-05-22 Wed 05:15]', + ' :END:', + }) + vim.cmd('edit ' .. file.filename) + + local clock = orgmode.clock + -- Trigger lazy init via has_clocked_headline + local first_result = clock:has_clocked_headline() + local first_headline = clock.clocked_headline + + -- Call again - should get same result + local second_result = clock:has_clocked_headline() + local second_headline = clock.clocked_headline + + assert.are.equal(first_result, second_result) + assert.are.same(first_headline:get_title(), second_headline:get_title()) + end) + + it('should not search before files are loaded', function() + local Clock = require('orgmode.clock') + -- Create a mock files object that isn't loaded + local mock_files = { + load_state = 'loading', -- Not 'loaded' + get_clocked_headline = function() + error('Should not be called when files not loaded') + end, + } + + local clock = Clock:new({ files = mock_files }) + assert.is_false(clock._clocked_headline_searched) + + -- Call _ensure_clocked_headline_searched directly + clock:_ensure_clocked_headline_searched() + + -- Should still be false - didn't search because files not loaded + assert.is_false(clock._clocked_headline_searched) + assert.is_nil(clock.clocked_headline) + end) + + it('should search when files are loaded', function() + local Clock = require('orgmode.clock') + local search_called = false + -- Create a mock files object that is loaded + local mock_files = { + load_state = 'loaded', + get_clocked_headline = function() + search_called = true + return nil + end, + } + + local clock = Clock:new({ files = mock_files }) + assert.is_false(clock._clocked_headline_searched) + assert.is_false(search_called) + + -- Call _ensure_clocked_headline_searched directly + clock:_ensure_clocked_headline_searched() + + -- Should be true now - searched because files are loaded + assert.is_true(clock._clocked_headline_searched) + assert.is_true(search_called) + end) + end) end) diff --git a/tests/plenary/utils/profiler_spec.lua b/tests/plenary/utils/profiler_spec.lua new file mode 100644 index 000000000..9b883e2f2 --- /dev/null +++ b/tests/plenary/utils/profiler_spec.lua @@ -0,0 +1,266 @@ +local Profiler = require('orgmode.utils.profiler') +local emit = require('orgmode.utils.emit') + +-- Use unique category prefix to avoid conflicts with real orgmode events +local function cat(name) + return 'test:profiler:' .. name +end + +describe('Profiler', function() + before_each(function() + Profiler.clear() + Profiler.setup({ enabled = true }) + end) + + after_each(function() + Profiler.clear() + end) + + describe('payload passthrough', function() + it('preserves simple payload data', function() + local c = cat('simple') + emit.profile('start', c, 'started', { key = 'value' }) + + local data = Profiler.get_data() + assert.is_not_nil(data[c]) + assert.equals(1, #data[c].entries) + assert.is_not_nil(data[c].entries[1].payload) + assert.equals('value', data[c].entries[1].payload.key) + end) + + it('preserves numeric payload values', function() + local c = cat('numeric') + emit.profile('start', c, 'started') + emit.profile('mark', c, 'batch complete', { + batch_num = 1, + total_ms = 29.66, + files_count = 12, + }) + + local data = Profiler.get_data() + local entry = data[c].entries[2] + assert.equals(1, entry.payload.batch_num) + assert.equals(29.66, entry.payload.total_ms) + assert.equals(12, entry.payload.files_count) + end) + + it('preserves nested payload structures', function() + local c = cat('nested') + emit.profile('start', c, 'started', { + metadata = { + source = 'test', + details = { a = 1, b = 2 }, + }, + }) + + local data = Profiler.get_data() + local payload = data[c].entries[1].payload + assert.equals('test', payload.metadata.source) + assert.equals(1, payload.metadata.details.a) + assert.equals(2, payload.metadata.details.b) + end) + + it('preserves batch timing payload (real use case)', function() + -- This mirrors the actual batch payload from OrgFiles + local c = cat('batch') + emit.profile('start', c, 'loading', { total_files = 605 }) + emit.profile('mark', c, 'batch 1', { + batch_num = 1, + start_idx = 1, + end_idx = 12, + files_count = 12, + total_ms = 23.1, + mem_before_kb = 50000, + mem_after_kb = 51200, + mem_delta_kb = 1200, + }) + emit.profile('complete', c, 'done', { total_ms = 156.8 }) + + local data = Profiler.get_data() + assert.equals(3, #data[c].entries) + + -- Start entry + assert.equals(605, data[c].entries[1].payload.total_files) + + -- Batch entry + local batch = data[c].entries[2].payload + assert.equals(1, batch.batch_num) + assert.equals(1, batch.start_idx) + assert.equals(12, batch.end_idx) + assert.equals(12, batch.files_count) + assert.equals(23.1, batch.total_ms) + assert.equals(1200, batch.mem_delta_kb) + + -- Complete entry + assert.equals(156.8, data[c].entries[3].payload.total_ms) + end) + + it('handles nil payload gracefully', function() + local c = cat('nil_payload') + emit.profile('start', c, 'started') + emit.profile('mark', c, 'no payload') + + local data = Profiler.get_data() + assert.equals(2, #data[c].entries) + assert.is_nil(data[c].entries[1].payload) + assert.is_nil(data[c].entries[2].payload) + end) + + it('handles empty table payload', function() + local c = cat('empty_payload') + emit.profile('start', c, 'started', {}) + + local data = Profiler.get_data() + assert.is_not_nil(data[c].entries[1].payload) + assert.are.same({}, data[c].entries[1].payload) + end) + end) + + describe('event ordering', function() + it('captures events in emission order', function() + local c = cat('order') + emit.profile('start', c, 'first') + emit.profile('mark', c, 'second') + emit.profile('mark', c, 'third') + emit.profile('complete', c, 'fourth') + + local data = Profiler.get_data() + assert.equals(4, #data[c].entries) + assert.equals('first', data[c].entries[1].label) + assert.equals('second', data[c].entries[2].label) + assert.equals('third', data[c].entries[3].label) + assert.equals('fourth', data[c].entries[4].label) + end) + + it('maintains order across interleaved categories', function() + local c_a = cat('interleave_a') + local c_b = cat('interleave_b') + emit.profile('start', c_a, 'a1') + emit.profile('start', c_b, 'b1') + emit.profile('mark', c_a, 'a2') + emit.profile('mark', c_b, 'b2') + emit.profile('complete', c_a, 'a3') + emit.profile('complete', c_b, 'b3') + + local data = Profiler.get_data() + assert.equals('a1', data[c_a].entries[1].label) + assert.equals('a2', data[c_a].entries[2].label) + assert.equals('a3', data[c_a].entries[3].label) + assert.equals('b1', data[c_b].entries[1].label) + assert.equals('b2', data[c_b].entries[2].label) + assert.equals('b3', data[c_b].entries[3].label) + end) + end) + + describe('timing monotonicity', function() + it('total_ms always increases within a session', function() + local c = cat('monotonic') + emit.profile('start', c, 'start') + vim.wait(5) -- small delay to ensure measurable time passes + emit.profile('mark', c, 'mark1') + vim.wait(5) + emit.profile('mark', c, 'mark2') + vim.wait(5) + emit.profile('complete', c, 'end') + + local entries = Profiler.get_data()[c].entries + for i = 2, #entries do + assert.is_true( + entries[i].total_ms >= entries[i - 1].total_ms, + string.format( + 'Entry %d total_ms (%.2f) should be >= entry %d total_ms (%.2f)', + i, + entries[i].total_ms, + i - 1, + entries[i - 1].total_ms + ) + ) + end + end) + + it('first entry has total_ms of 0', function() + local c = cat('first_zero') + emit.profile('start', c, 'start') + + local entries = Profiler.get_data()[c].entries + assert.equals(0, entries[1].total_ms) + end) + end) + + describe('delta calculation', function() + it('delta_ms equals difference between consecutive total_ms', function() + local c = cat('delta') + emit.profile('start', c, 'start') + vim.wait(10) + emit.profile('mark', c, 'mark1') + vim.wait(15) + emit.profile('complete', c, 'end') + + local entries = Profiler.get_data()[c].entries + for i = 2, #entries do + local expected_delta = entries[i].total_ms - entries[i - 1].total_ms + -- Allow small floating point tolerance + assert.is_true( + math.abs(entries[i].delta_ms - expected_delta) < 0.001, + string.format('Entry %d delta_ms mismatch: got %.3f, expected %.3f', i, entries[i].delta_ms, expected_delta) + ) + end + end) + + it('first entry has delta_ms of 0', function() + local c = cat('delta_first') + emit.profile('start', c, 'start') + + local entries = Profiler.get_data()[c].entries + assert.equals(0, entries[1].delta_ms) + end) + end) + + describe('section isolation', function() + it('isolates categories into separate sessions', function() + local c_init = cat('iso_init') + local c_files = cat('iso_files') + emit.profile('start', c_init, 'init start') + emit.profile('start', c_files, 'files start') + emit.profile('mark', c_init, 'init mark') + emit.profile('mark', c_files, 'files mark') + + local data = Profiler.get_data() + assert.equals(2, #data[c_init].entries) + assert.equals(2, #data[c_files].entries) + assert.equals('init mark', data[c_init].entries[2].label) + assert.equals('files mark', data[c_files].entries[2].label) + end) + + it('each category has independent timing', function() + local c_fast = cat('timing_fast') + local c_slow = cat('timing_slow') + emit.profile('start', c_fast, 'start') + emit.profile('complete', c_fast, 'end') + + vim.wait(50) -- delay before starting second category + + emit.profile('start', c_slow, 'start') + emit.profile('complete', c_slow, 'end') + + local data = Profiler.get_data() + -- Both should start at 0, regardless of wall clock + assert.equals(0, data[c_fast].entries[1].total_ms) + assert.equals(0, data[c_slow].entries[1].total_ms) + end) + end) + + describe('disabled state', function() + it('does not capture events when disabled', function() + local c = cat('disabled') + Profiler.setup({ enabled = false }) + + emit.profile('start', c, 'start') + emit.profile('mark', c, 'mark') + emit.profile('complete', c, 'end') + + local data = Profiler.get_data() + assert.is_nil(data[c]) + end) + end) +end)