trace.h: support nested performance tracing
Performance measurements are listed right now as a flat list, which is fine when we measure big blocks. But when we start adding more and more measurements, some of them could be just part of a bigger measurement and a flat list gives a wrong impression that they are executed at the same level instead of nested. Add trace_performance_enter() and trace_performance_leave() to allow indent these nested measurements. For now it does not help much because the only nested thing is (lazy) name hash initialization (e.g. called in diff-index from "git status"). This will help more because I'm going to add some more tracing that's actually nested. Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>maint
							parent
							
								
									fa03cdc39b
								
							
						
					
					
						commit
						c46c406ae1
					
				|  | @ -518,11 +518,11 @@ static int diff_cache(struct rev_info *revs, | ||||||
| int run_diff_index(struct rev_info *revs, int cached) | int run_diff_index(struct rev_info *revs, int cached) | ||||||
| { | { | ||||||
| 	struct object_array_entry *ent; | 	struct object_array_entry *ent; | ||||||
| 	uint64_t start = getnanotime(); |  | ||||||
|  |  | ||||||
| 	if (revs->pending.nr != 1) | 	if (revs->pending.nr != 1) | ||||||
| 		BUG("run_diff_index must be passed exactly one tree"); | 		BUG("run_diff_index must be passed exactly one tree"); | ||||||
|  |  | ||||||
|  | 	trace_performance_enter(); | ||||||
| 	ent = revs->pending.objects; | 	ent = revs->pending.objects; | ||||||
| 	if (diff_cache(revs, &ent->item->oid, ent->name, cached)) | 	if (diff_cache(revs, &ent->item->oid, ent->name, cached)) | ||||||
| 		exit(128); | 		exit(128); | ||||||
|  | @ -531,7 +531,7 @@ int run_diff_index(struct rev_info *revs, int cached) | ||||||
| 	diffcore_fix_diff_index(&revs->diffopt); | 	diffcore_fix_diff_index(&revs->diffopt); | ||||||
| 	diffcore_std(&revs->diffopt); | 	diffcore_std(&revs->diffopt); | ||||||
| 	diff_flush(&revs->diffopt); | 	diff_flush(&revs->diffopt); | ||||||
| 	trace_performance_since(start, "diff-index"); | 	trace_performance_leave("diff-index"); | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
							
								
								
									
										9
									
								
								dir.c
								
								
								
								
							
							
						
						
									
										9
									
								
								dir.c
								
								
								
								
							|  | @ -2263,10 +2263,13 @@ int read_directory(struct dir_struct *dir, struct index_state *istate, | ||||||
| 		   const char *path, int len, const struct pathspec *pathspec) | 		   const char *path, int len, const struct pathspec *pathspec) | ||||||
| { | { | ||||||
| 	struct untracked_cache_dir *untracked; | 	struct untracked_cache_dir *untracked; | ||||||
| 	uint64_t start = getnanotime(); |  | ||||||
|  |  | ||||||
| 	if (has_symlink_leading_path(path, len)) | 	trace_performance_enter(); | ||||||
|  |  | ||||||
|  | 	if (has_symlink_leading_path(path, len)) { | ||||||
|  | 		trace_performance_leave("read directory %.*s", len, path); | ||||||
| 		return dir->nr; | 		return dir->nr; | ||||||
|  | 	} | ||||||
|  |  | ||||||
| 	untracked = validate_untracked_cache(dir, len, pathspec); | 	untracked = validate_untracked_cache(dir, len, pathspec); | ||||||
| 	if (!untracked) | 	if (!untracked) | ||||||
|  | @ -2302,7 +2305,7 @@ int read_directory(struct dir_struct *dir, struct index_state *istate, | ||||||
| 		dir->nr = i; | 		dir->nr = i; | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	trace_performance_since(start, "read directory %.*s", len, path); | 	trace_performance_leave("read directory %.*s", len, path); | ||||||
| 	if (dir->untracked) { | 	if (dir->untracked) { | ||||||
| 		static int force_untracked_cache = -1; | 		static int force_untracked_cache = -1; | ||||||
| 		static struct trace_key trace_untracked_stats = TRACE_KEY_INIT(UNTRACKED_STATS); | 		static struct trace_key trace_untracked_stats = TRACE_KEY_INIT(UNTRACKED_STATS); | ||||||
|  |  | ||||||
|  | @ -578,10 +578,10 @@ static void threaded_lazy_init_name_hash( | ||||||
|  |  | ||||||
| static void lazy_init_name_hash(struct index_state *istate) | static void lazy_init_name_hash(struct index_state *istate) | ||||||
| { | { | ||||||
| 	uint64_t start = getnanotime(); |  | ||||||
|  |  | ||||||
| 	if (istate->name_hash_initialized) | 	if (istate->name_hash_initialized) | ||||||
| 		return; | 		return; | ||||||
|  | 	trace_performance_enter(); | ||||||
| 	hashmap_init(&istate->name_hash, cache_entry_cmp, NULL, istate->cache_nr); | 	hashmap_init(&istate->name_hash, cache_entry_cmp, NULL, istate->cache_nr); | ||||||
| 	hashmap_init(&istate->dir_hash, dir_entry_cmp, NULL, istate->cache_nr); | 	hashmap_init(&istate->dir_hash, dir_entry_cmp, NULL, istate->cache_nr); | ||||||
|  |  | ||||||
|  | @ -602,7 +602,7 @@ static void lazy_init_name_hash(struct index_state *istate) | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	istate->name_hash_initialized = 1; | 	istate->name_hash_initialized = 1; | ||||||
| 	trace_performance_since(start, "initialize name hash"); | 	trace_performance_leave("initialize name hash"); | ||||||
| } | } | ||||||
|  |  | ||||||
| /* | /* | ||||||
|  |  | ||||||
|  | @ -78,7 +78,6 @@ static void preload_index(struct index_state *index, | ||||||
| { | { | ||||||
| 	int threads, i, work, offset; | 	int threads, i, work, offset; | ||||||
| 	struct thread_data data[MAX_PARALLEL]; | 	struct thread_data data[MAX_PARALLEL]; | ||||||
| 	uint64_t start = getnanotime(); |  | ||||||
|  |  | ||||||
| 	if (!core_preload_index) | 	if (!core_preload_index) | ||||||
| 		return; | 		return; | ||||||
|  | @ -88,6 +87,7 @@ static void preload_index(struct index_state *index, | ||||||
| 		threads = 2; | 		threads = 2; | ||||||
| 	if (threads < 2) | 	if (threads < 2) | ||||||
| 		return; | 		return; | ||||||
|  | 	trace_performance_enter(); | ||||||
| 	if (threads > MAX_PARALLEL) | 	if (threads > MAX_PARALLEL) | ||||||
| 		threads = MAX_PARALLEL; | 		threads = MAX_PARALLEL; | ||||||
| 	offset = 0; | 	offset = 0; | ||||||
|  | @ -109,7 +109,7 @@ static void preload_index(struct index_state *index, | ||||||
| 		if (pthread_join(p->pthread, NULL)) | 		if (pthread_join(p->pthread, NULL)) | ||||||
| 			die("unable to join threaded lstat"); | 			die("unable to join threaded lstat"); | ||||||
| 	} | 	} | ||||||
| 	trace_performance_since(start, "preload index"); | 	trace_performance_leave("preload index"); | ||||||
| } | } | ||||||
| #endif | #endif | ||||||
|  |  | ||||||
|  |  | ||||||
							
								
								
									
										11
									
								
								read-cache.c
								
								
								
								
							
							
						
						
									
										11
									
								
								read-cache.c
								
								
								
								
							|  | @ -1476,8 +1476,8 @@ int refresh_index(struct index_state *istate, unsigned int flags, | ||||||
| 	const char *typechange_fmt; | 	const char *typechange_fmt; | ||||||
| 	const char *added_fmt; | 	const char *added_fmt; | ||||||
| 	const char *unmerged_fmt; | 	const char *unmerged_fmt; | ||||||
| 	uint64_t start = getnanotime(); |  | ||||||
|  |  | ||||||
|  | 	trace_performance_enter(); | ||||||
| 	modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n"); | 	modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n"); | ||||||
| 	deleted_fmt = (in_porcelain ? "D\t%s\n" : "%s: needs update\n"); | 	deleted_fmt = (in_porcelain ? "D\t%s\n" : "%s: needs update\n"); | ||||||
| 	typechange_fmt = (in_porcelain ? "T\t%s\n" : "%s needs update\n"); | 	typechange_fmt = (in_porcelain ? "T\t%s\n" : "%s needs update\n"); | ||||||
|  | @ -1547,7 +1547,7 @@ int refresh_index(struct index_state *istate, unsigned int flags, | ||||||
|  |  | ||||||
| 		replace_index_entry(istate, i, new_entry); | 		replace_index_entry(istate, i, new_entry); | ||||||
| 	} | 	} | ||||||
| 	trace_performance_since(start, "refresh index"); | 	trace_performance_leave("refresh index"); | ||||||
| 	return has_errors; | 	return has_errors; | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -2002,7 +2002,6 @@ static void freshen_shared_index(const char *shared_index, int warn) | ||||||
| int read_index_from(struct index_state *istate, const char *path, | int read_index_from(struct index_state *istate, const char *path, | ||||||
| 		    const char *gitdir) | 		    const char *gitdir) | ||||||
| { | { | ||||||
| 	uint64_t start = getnanotime(); |  | ||||||
| 	struct split_index *split_index; | 	struct split_index *split_index; | ||||||
| 	int ret; | 	int ret; | ||||||
| 	char *base_oid_hex; | 	char *base_oid_hex; | ||||||
|  | @ -2012,8 +2011,9 @@ int read_index_from(struct index_state *istate, const char *path, | ||||||
| 	if (istate->initialized) | 	if (istate->initialized) | ||||||
| 		return istate->cache_nr; | 		return istate->cache_nr; | ||||||
|  |  | ||||||
|  | 	trace_performance_enter(); | ||||||
| 	ret = do_read_index(istate, path, 0); | 	ret = do_read_index(istate, path, 0); | ||||||
| 	trace_performance_since(start, "read cache %s", path); | 	trace_performance_leave("read cache %s", path); | ||||||
|  |  | ||||||
| 	split_index = istate->split_index; | 	split_index = istate->split_index; | ||||||
| 	if (!split_index || is_null_oid(&split_index->base_oid)) { | 	if (!split_index || is_null_oid(&split_index->base_oid)) { | ||||||
|  | @ -2021,6 +2021,7 @@ int read_index_from(struct index_state *istate, const char *path, | ||||||
| 		return ret; | 		return ret; | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | 	trace_performance_enter(); | ||||||
| 	if (split_index->base) | 	if (split_index->base) | ||||||
| 		discard_index(split_index->base); | 		discard_index(split_index->base); | ||||||
| 	else | 	else | ||||||
|  | @ -2037,8 +2038,8 @@ int read_index_from(struct index_state *istate, const char *path, | ||||||
| 	freshen_shared_index(base_path, 0); | 	freshen_shared_index(base_path, 0); | ||||||
| 	merge_base_index(istate); | 	merge_base_index(istate); | ||||||
| 	post_read_index_from(istate); | 	post_read_index_from(istate); | ||||||
| 	trace_performance_since(start, "read cache %s", base_path); |  | ||||||
| 	free(base_path); | 	free(base_path); | ||||||
|  | 	trace_performance_leave("read cache %s", base_path); | ||||||
| 	return ret; | 	return ret; | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
							
								
								
									
										69
									
								
								trace.c
								
								
								
								
							
							
						
						
									
										69
									
								
								trace.c
								
								
								
								
							|  | @ -176,10 +176,30 @@ void trace_strbuf_fl(const char *file, int line, struct trace_key *key, | ||||||
| 	strbuf_release(&buf); | 	strbuf_release(&buf); | ||||||
| } | } | ||||||
|  |  | ||||||
|  | static uint64_t perf_start_times[10]; | ||||||
|  | static int perf_indent; | ||||||
|  |  | ||||||
|  | uint64_t trace_performance_enter(void) | ||||||
|  | { | ||||||
|  | 	uint64_t now; | ||||||
|  |  | ||||||
|  | 	if (!trace_want(&trace_perf_key)) | ||||||
|  | 		return 0; | ||||||
|  |  | ||||||
|  | 	now = getnanotime(); | ||||||
|  | 	perf_start_times[perf_indent] = now; | ||||||
|  | 	if (perf_indent + 1 < ARRAY_SIZE(perf_start_times)) | ||||||
|  | 		perf_indent++; | ||||||
|  | 	else | ||||||
|  | 		BUG("Too deep indentation"); | ||||||
|  | 	return now; | ||||||
|  | } | ||||||
|  |  | ||||||
| static void trace_performance_vprintf_fl(const char *file, int line, | static void trace_performance_vprintf_fl(const char *file, int line, | ||||||
| 					 uint64_t nanos, const char *format, | 					 uint64_t nanos, const char *format, | ||||||
| 					 va_list ap) | 					 va_list ap) | ||||||
| { | { | ||||||
|  | 	static const char space[] = "          "; | ||||||
| 	struct strbuf buf = STRBUF_INIT; | 	struct strbuf buf = STRBUF_INIT; | ||||||
|  |  | ||||||
| 	if (!prepare_trace_line(file, line, &trace_perf_key, &buf)) | 	if (!prepare_trace_line(file, line, &trace_perf_key, &buf)) | ||||||
|  | @ -188,7 +208,10 @@ static void trace_performance_vprintf_fl(const char *file, int line, | ||||||
| 	strbuf_addf(&buf, "performance: %.9f s", (double) nanos / 1000000000); | 	strbuf_addf(&buf, "performance: %.9f s", (double) nanos / 1000000000); | ||||||
|  |  | ||||||
| 	if (format && *format) { | 	if (format && *format) { | ||||||
| 		strbuf_addstr(&buf, ": "); | 		if (perf_indent >= strlen(space)) | ||||||
|  | 			BUG("Too deep indentation"); | ||||||
|  |  | ||||||
|  | 		strbuf_addf(&buf, ":%.*s ", perf_indent, space); | ||||||
| 		strbuf_vaddf(&buf, format, ap); | 		strbuf_vaddf(&buf, format, ap); | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | @ -244,6 +267,24 @@ void trace_performance_since(uint64_t start, const char *format, ...) | ||||||
| 	va_end(ap); | 	va_end(ap); | ||||||
| } | } | ||||||
|  |  | ||||||
|  | void trace_performance_leave(const char *format, ...) | ||||||
|  | { | ||||||
|  | 	va_list ap; | ||||||
|  | 	uint64_t since; | ||||||
|  |  | ||||||
|  | 	if (perf_indent) | ||||||
|  | 		perf_indent--; | ||||||
|  |  | ||||||
|  | 	if (!format) /* Allow callers to leave without tracing anything */ | ||||||
|  | 		return; | ||||||
|  |  | ||||||
|  | 	since = perf_start_times[perf_indent]; | ||||||
|  | 	va_start(ap, format); | ||||||
|  | 	trace_performance_vprintf_fl(NULL, 0, getnanotime() - since, | ||||||
|  | 				     format, ap); | ||||||
|  | 	va_end(ap); | ||||||
|  | } | ||||||
|  |  | ||||||
| #else | #else | ||||||
|  |  | ||||||
| void trace_printf_key_fl(const char *file, int line, struct trace_key *key, | void trace_printf_key_fl(const char *file, int line, struct trace_key *key, | ||||||
|  | @ -273,6 +314,24 @@ void trace_performance_fl(const char *file, int line, uint64_t nanos, | ||||||
| 	va_end(ap); | 	va_end(ap); | ||||||
| } | } | ||||||
|  |  | ||||||
|  | void trace_performance_leave_fl(const char *file, int line, | ||||||
|  | 				uint64_t nanos, const char *format, ...) | ||||||
|  | { | ||||||
|  | 	va_list ap; | ||||||
|  | 	uint64_t since; | ||||||
|  |  | ||||||
|  | 	if (perf_indent) | ||||||
|  | 		perf_indent--; | ||||||
|  |  | ||||||
|  | 	if (!format) /* Allow callers to leave without tracing anything */ | ||||||
|  | 		return; | ||||||
|  |  | ||||||
|  | 	since = perf_start_times[perf_indent]; | ||||||
|  | 	va_start(ap, format); | ||||||
|  | 	trace_performance_vprintf_fl(file, line, nanos - since, format, ap); | ||||||
|  | 	va_end(ap); | ||||||
|  | } | ||||||
|  |  | ||||||
| #endif /* HAVE_VARIADIC_MACROS */ | #endif /* HAVE_VARIADIC_MACROS */ | ||||||
|  |  | ||||||
|  |  | ||||||
|  | @ -411,13 +470,11 @@ uint64_t getnanotime(void) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| static uint64_t command_start_time; |  | ||||||
| static struct strbuf command_line = STRBUF_INIT; | static struct strbuf command_line = STRBUF_INIT; | ||||||
|  |  | ||||||
| static void print_command_performance_atexit(void) | static void print_command_performance_atexit(void) | ||||||
| { | { | ||||||
| 	trace_performance_since(command_start_time, "git command:%s", | 	trace_performance_leave("git command:%s", command_line.buf); | ||||||
| 				command_line.buf); |  | ||||||
| } | } | ||||||
|  |  | ||||||
| void trace_command_performance(const char **argv) | void trace_command_performance(const char **argv) | ||||||
|  | @ -425,10 +482,10 @@ void trace_command_performance(const char **argv) | ||||||
| 	if (!trace_want(&trace_perf_key)) | 	if (!trace_want(&trace_perf_key)) | ||||||
| 		return; | 		return; | ||||||
|  |  | ||||||
| 	if (!command_start_time) | 	if (!command_line.len) | ||||||
| 		atexit(print_command_performance_atexit); | 		atexit(print_command_performance_atexit); | ||||||
|  |  | ||||||
| 	strbuf_reset(&command_line); | 	strbuf_reset(&command_line); | ||||||
| 	sq_quote_argv_pretty(&command_line, argv); | 	sq_quote_argv_pretty(&command_line, argv); | ||||||
| 	command_start_time = getnanotime(); | 	trace_performance_enter(); | ||||||
| } | } | ||||||
|  |  | ||||||
							
								
								
									
										15
									
								
								trace.h
								
								
								
								
							
							
						
						
									
										15
									
								
								trace.h
								
								
								
								
							|  | @ -23,6 +23,7 @@ extern void trace_disable(struct trace_key *key); | ||||||
| extern uint64_t getnanotime(void); | extern uint64_t getnanotime(void); | ||||||
| extern void trace_command_performance(const char **argv); | extern void trace_command_performance(const char **argv); | ||||||
| extern void trace_verbatim(struct trace_key *key, const void *buf, unsigned len); | extern void trace_verbatim(struct trace_key *key, const void *buf, unsigned len); | ||||||
|  | uint64_t trace_performance_enter(void); | ||||||
|  |  | ||||||
| #ifndef HAVE_VARIADIC_MACROS | #ifndef HAVE_VARIADIC_MACROS | ||||||
|  |  | ||||||
|  | @ -45,6 +46,9 @@ extern void trace_performance(uint64_t nanos, const char *format, ...); | ||||||
| __attribute__((format (printf, 2, 3))) | __attribute__((format (printf, 2, 3))) | ||||||
| extern void trace_performance_since(uint64_t start, const char *format, ...); | extern void trace_performance_since(uint64_t start, const char *format, ...); | ||||||
|  |  | ||||||
|  | __attribute__((format (printf, 1, 2))) | ||||||
|  | void trace_performance_leave(const char *format, ...); | ||||||
|  |  | ||||||
| #else | #else | ||||||
|  |  | ||||||
| /* | /* | ||||||
|  | @ -118,6 +122,14 @@ extern void trace_performance_since(uint64_t start, const char *format, ...); | ||||||
| 					     __VA_ARGS__);		    \ | 					     __VA_ARGS__);		    \ | ||||||
| 	} while (0) | 	} while (0) | ||||||
|  |  | ||||||
|  | #define trace_performance_leave(...)					    \ | ||||||
|  | 	do {								    \ | ||||||
|  | 		if (trace_pass_fl(&trace_perf_key))			    \ | ||||||
|  | 			trace_performance_leave_fl(TRACE_CONTEXT, __LINE__, \ | ||||||
|  | 						   getnanotime(),	    \ | ||||||
|  | 						   __VA_ARGS__);	    \ | ||||||
|  | 	} while (0) | ||||||
|  |  | ||||||
| /* backend functions, use non-*fl macros instead */ | /* backend functions, use non-*fl macros instead */ | ||||||
| __attribute__((format (printf, 4, 5))) | __attribute__((format (printf, 4, 5))) | ||||||
| extern void trace_printf_key_fl(const char *file, int line, struct trace_key *key, | extern void trace_printf_key_fl(const char *file, int line, struct trace_key *key, | ||||||
|  | @ -130,6 +142,9 @@ extern void trace_strbuf_fl(const char *file, int line, struct trace_key *key, | ||||||
| __attribute__((format (printf, 4, 5))) | __attribute__((format (printf, 4, 5))) | ||||||
| extern void trace_performance_fl(const char *file, int line, | extern void trace_performance_fl(const char *file, int line, | ||||||
| 				 uint64_t nanos, const char *fmt, ...); | 				 uint64_t nanos, const char *fmt, ...); | ||||||
|  | __attribute__((format (printf, 4, 5))) | ||||||
|  | extern void trace_performance_leave_fl(const char *file, int line, | ||||||
|  | 				       uint64_t nanos, const char *fmt, ...); | ||||||
| static inline int trace_pass_fl(struct trace_key *key) | static inline int trace_pass_fl(struct trace_key *key) | ||||||
| { | { | ||||||
| 	return key->fd || !key->initialized; | 	return key->fd || !key->initialized; | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue
	
	 Nguyễn Thái Ngọc Duy
						Nguyễn Thái Ngọc Duy