summaryrefslogtreecommitdiff
path: root/src/profiler.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/profiler.c')
-rw-r--r--src/profiler.c145
1 files changed, 79 insertions, 66 deletions
diff --git a/src/profiler.c b/src/profiler.c
index 8247b2e90c6..6217071ef9c 100644
--- a/src/profiler.c
+++ b/src/profiler.c
@@ -49,7 +49,13 @@ static const struct hash_table_test hashtest_profiler =
hashfn_profiler,
};
-static Lisp_Object
+struct profiler_log {
+ Lisp_Object log;
+ EMACS_INT gc_count; /* Samples taken during GC. */
+ EMACS_INT discarded; /* Samples evicted during table overflow. */
+};
+
+static struct profiler_log
make_log (void)
{
/* We use a standard Elisp hash-table object, but we use it in
@@ -60,11 +66,13 @@ make_log (void)
= clip_to_bounds (0, profiler_log_size, MOST_POSITIVE_FIXNUM);
ptrdiff_t max_stack_depth
= clip_to_bounds (0, profiler_max_stack_depth, PTRDIFF_MAX);;
- Lisp_Object log = make_hash_table (hashtest_profiler, heap_size,
- DEFAULT_REHASH_SIZE,
- DEFAULT_REHASH_THRESHOLD,
- Qnil, false);
- struct Lisp_Hash_Table *h = XHASH_TABLE (log);
+ struct profiler_log log
+ = { make_hash_table (hashtest_profiler, heap_size,
+ DEFAULT_REHASH_SIZE,
+ DEFAULT_REHASH_THRESHOLD,
+ Qnil, false),
+ 0, 0 };
+ struct Lisp_Hash_Table *h = XHASH_TABLE (log.log);
/* What is special about our hash-tables is that the values are pre-filled
with the vectors we'll use as keys. */
@@ -116,8 +124,9 @@ static EMACS_INT approximate_median (log_t *log,
}
}
-static void evict_lower_half (log_t *log)
+static void evict_lower_half (struct profiler_log *plog)
{
+ log_t *log = XHASH_TABLE (plog->log);
ptrdiff_t size = ASIZE (log->key_and_value) / 2;
EMACS_INT median = approximate_median (log, 0, size);
@@ -127,6 +136,8 @@ static void evict_lower_half (log_t *log)
if (XFIXNUM (HASH_VALUE (log, i)) <= median)
{
Lisp_Object key = HASH_KEY (log, i);
+ EMACS_INT count = XFIXNUM (HASH_VALUE (log, i));
+ plog->discarded = saturated_add (plog->discarded, count);
{ /* FIXME: we could make this more efficient. */
Lisp_Object tmp;
XSET_HASH_TABLE (tmp, log); /* FIXME: Use make_lisp_ptr. */
@@ -148,12 +159,12 @@ static void evict_lower_half (log_t *log)
size for memory. */
static void
-record_backtrace (log_t *log, EMACS_INT count)
+record_backtrace (struct profiler_log *plog, EMACS_INT count)
{
+ eassert (HASH_TABLE_P (plog->log));
+ log_t *log = XHASH_TABLE (plog->log);
if (log->next_free < 0)
- /* FIXME: transfer the evicted counts to a special entry rather
- than dropping them on the floor. */
- evict_lower_half (log);
+ evict_lower_half (plog);
ptrdiff_t index = log->next_free;
/* Get a "working memory" vector. */
@@ -222,10 +233,10 @@ static enum profiler_cpu_running
profiler_cpu_running;
/* Hash-table log of CPU profiler. */
-static Lisp_Object cpu_log;
+static struct profiler_log cpu;
-/* Separate counter for the time spent in the GC. */
-static EMACS_INT cpu_gc_count;
+/* Hash-table log of Memory profiler. */
+static struct profiler_log memory;
/* The current sampling interval in nanoseconds. */
static EMACS_INT current_sampling_interval;
@@ -233,30 +244,34 @@ static EMACS_INT current_sampling_interval;
/* Signal handler for sampling profiler. */
static void
-handle_profiler_signal (int signal)
+add_sample (struct profiler_log *plog, EMACS_INT count)
{
- if (EQ (backtrace_top_function (), QAutomatic_GC))
+ if (EQ (backtrace_top_function (), QAutomatic_GC)) /* bug#60237 */
/* Special case the time-count inside GC because the hash-table
code is not prepared to be used while the GC is running.
More specifically it uses ASIZE at many places where it does
not expect the ARRAY_MARK_FLAG to be set. We could try and
harden the hash-table code, but it doesn't seem worth the
effort. */
- cpu_gc_count = saturated_add (cpu_gc_count, 1);
+ plog->gc_count = saturated_add (plog->gc_count, count);
else
- {
- EMACS_INT count = 1;
+ record_backtrace (plog, count);
+}
+
+
+static void
+handle_profiler_signal (int signal)
+{
+ EMACS_INT count = 1;
#if defined HAVE_ITIMERSPEC && defined HAVE_TIMER_GETOVERRUN
- if (profiler_timer_ok)
- {
- int overruns = timer_getoverrun (profiler_timer);
- eassert (overruns >= 0);
- count += overruns;
- }
-#endif
- eassert (HASH_TABLE_P (cpu_log));
- record_backtrace (XHASH_TABLE (cpu_log), count);
+ if (profiler_timer_ok)
+ {
+ int overruns = timer_getoverrun (profiler_timer);
+ eassert (overruns >= 0);
+ count += overruns;
}
+#endif
+ add_sample (&cpu, count);
}
static void
@@ -343,11 +358,8 @@ See also `profiler-log-size' and `profiler-max-stack-depth'. */)
if (profiler_cpu_running)
error ("CPU profiler is already running");
- if (NILP (cpu_log))
- {
- cpu_gc_count = 0;
- cpu_log = make_log ();
- }
+ if (NILP (cpu.log))
+ cpu = make_log ();
int status = setup_cpu_timer (sampling_interval);
if (status < 0)
@@ -409,6 +421,26 @@ DEFUN ("profiler-cpu-running-p",
return profiler_cpu_running ? Qt : Qnil;
}
+static Lisp_Object
+export_log (struct profiler_log *log)
+{
+ Lisp_Object result = log->log;
+ if (log->gc_count)
+ Fputhash (CALLN (Fvector, QAutomatic_GC, Qnil),
+ make_fixnum (log->gc_count),
+ result);
+ if (log->discarded)
+ Fputhash (CALLN (Fvector, QDiscarded_Samples, Qnil),
+ make_fixnum (log->discarded),
+ result);
+ /* Here we're making the log visible to Elisp, so it's not safe any
+ more for our use afterwards since we can't rely on its special
+ pre-allocated keys anymore. So we have to allocate a new one. */
+ if (profiler_cpu_running)
+ *log = make_log ();
+ return result;
+}
+
DEFUN ("profiler-cpu-log", Fprofiler_cpu_log, Sprofiler_cpu_log,
0, 0, 0,
doc: /* Return the current cpu profiler log.
@@ -418,16 +450,7 @@ of functions, where the last few elements may be nil.
Before returning, a new log is allocated for future samples. */)
(void)
{
- Lisp_Object result = cpu_log;
- /* Here we're making the log visible to Elisp, so it's not safe any
- more for our use afterwards since we can't rely on its special
- pre-allocated keys anymore. So we have to allocate a new one. */
- cpu_log = profiler_cpu_running ? make_log () : Qnil;
- Fputhash (make_vector (1, QAutomatic_GC),
- make_fixnum (cpu_gc_count),
- result);
- cpu_gc_count = 0;
- return result;
+ return (export_log (&cpu));
}
#endif /* PROFILER_CPU_SUPPORT */
@@ -436,8 +459,6 @@ Before returning, a new log is allocated for future samples. */)
/* True if memory profiler is running. */
bool profiler_memory_running;
-static Lisp_Object memory_log;
-
DEFUN ("profiler-memory-start", Fprofiler_memory_start, Sprofiler_memory_start,
0, 0, 0,
doc: /* Start/restart the memory profiler.
@@ -450,8 +471,8 @@ See also `profiler-log-size' and `profiler-max-stack-depth'. */)
if (profiler_memory_running)
error ("Memory profiler is already running");
- if (NILP (memory_log))
- memory_log = make_log ();
+ if (NILP (memory.log))
+ memory = make_log ();
profiler_memory_running = true;
@@ -490,12 +511,7 @@ of functions, where the last few elements may be nil.
Before returning, a new log is allocated for future samples. */)
(void)
{
- Lisp_Object result = memory_log;
- /* Here we're making the log visible to Elisp , so it's not safe any
- more for our use afterwards since we can't rely on its special
- pre-allocated keys anymore. So we have to allocate a new one. */
- memory_log = profiler_memory_running ? make_log () : Qnil;
- return result;
+ return (export_log (&memory));
}
@@ -505,11 +521,7 @@ Before returning, a new log is allocated for future samples. */)
void
malloc_probe (size_t size)
{
- if (EQ (backtrace_top_function (), QAutomatic_GC)) /* bug#60237 */
- /* FIXME: We should do something like what we did with `cpu_gc_count`. */
- return;
- eassert (HASH_TABLE_P (memory_log));
- record_backtrace (XHASH_TABLE (memory_log), min (size, MOST_POSITIVE_FIXNUM));
+ add_sample (&memory, min (size, MOST_POSITIVE_FIXNUM));
}
DEFUN ("function-equal", Ffunction_equal, Sfunction_equal, 2, 2, 0,
@@ -589,21 +601,22 @@ to make room for new entries. */);
profiler_log_size = 10000;
DEFSYM (Qprofiler_backtrace_equal, "profiler-backtrace-equal");
+ DEFSYM (QDiscarded_Samples, "Discarded Samples");
defsubr (&Sfunction_equal);
#ifdef PROFILER_CPU_SUPPORT
profiler_cpu_running = NOT_RUNNING;
- cpu_log = Qnil;
- staticpro (&cpu_log);
+ cpu.log = Qnil;
+ staticpro (&cpu.log);
defsubr (&Sprofiler_cpu_start);
defsubr (&Sprofiler_cpu_stop);
defsubr (&Sprofiler_cpu_running_p);
defsubr (&Sprofiler_cpu_log);
#endif
profiler_memory_running = false;
- memory_log = Qnil;
- staticpro (&memory_log);
+ memory.log = Qnil;
+ staticpro (&memory.log);
defsubr (&Sprofiler_memory_start);
defsubr (&Sprofiler_memory_stop);
defsubr (&Sprofiler_memory_running_p);
@@ -618,16 +631,16 @@ syms_of_profiler_for_pdumper (void)
if (dumped_with_pdumper_p ())
{
#ifdef PROFILER_CPU_SUPPORT
- cpu_log = Qnil;
+ cpu.log = Qnil;
#endif
- memory_log = Qnil;
+ memory.log = Qnil;
}
else
{
#ifdef PROFILER_CPU_SUPPORT
- eassert (NILP (cpu_log));
+ eassert (NILP (cpu.log));
#endif
- eassert (NILP (memory_log));
+ eassert (NILP (memory.log));
}
}