scran_markers
Marker detection for single-cell data
Loading...
Searching...
No Matches
score_markers_summary.hpp
Go to the documentation of this file.
1#ifndef SCRAN_SCORE_MARKERS_HPP
2#define SCRAN_SCORE_MARKERS_HPP
3
4#include "scan_matrix.hpp"
5#include "cohens_d.hpp"
6#include "simple_diff.hpp"
8#include "average_group_stats.hpp"
9#include "create_combinations.hpp"
10
12#include "tatami/tatami.hpp"
13
14#include <array>
15#include <map>
16#include <vector>
17
23namespace scran_markers {
24
33 double threshold = 0;
34
39 int num_threads = 1;
40
45 int cache_size = 100;
46
51 bool compute_cohens_d = true;
52
57 bool compute_auc = true;
58
63 bool compute_delta_mean = true;
64
70
75 bool compute_min = true;
76
81 bool compute_mean = true;
82
87 bool compute_median = true;
88
93 bool compute_max = true;
94
99 bool compute_min_rank = true;
100
104 scran_blocks::WeightPolicy block_weight_policy = scran_blocks::WeightPolicy::VARIABLE;
105
111};
112
118template<typename Stat_, typename Rank_>
125 std::vector<Stat_*> mean;
126
132 std::vector<Stat_*> detected;
133
141 std::vector<SummaryBuffers<Stat_, Rank_> > cohens_d;
142
150 std::vector<SummaryBuffers<Stat_, Rank_> > auc;
151
159 std::vector<SummaryBuffers<Stat_, Rank_> > delta_mean;
160
168 std::vector<SummaryBuffers<Stat_, Rank_> > delta_detected;
169};
170
176template<typename Stat_, typename Rank_>
182 std::vector<std::vector<Stat_> > mean;
183
188 std::vector<std::vector<Stat_> > detected;
189
197 std::vector<SummaryResults<Stat_, Rank_> > cohens_d;
198
206 std::vector<SummaryResults<Stat_, Rank_> > auc;
207
215 std::vector<SummaryResults<Stat_, Rank_> > delta_mean;
216
224 std::vector<SummaryResults<Stat_, Rank_> > delta_detected;
225};
226
230namespace internal {
231
232enum class CacheAction : unsigned char { SKIP, COMPUTE, CACHE };
233
234/*
235 * We compute effect sizes in a pairwise fashion with some nested loops, i.e.,
236 * iterating from g1 in [1, G) and then for g2 in [0, g1). When we compute the
237 * effect size for g1 vs g2, we sometimes get a free effect size for g2 vs g1,
238 * which we call the "reverse effect". However, we can't use the reverse effect
239 * size until we get around to summarizing effects for g2, hence the caching.
240 *
241 * This cache tries to store as many of the reverse effects as possible before
242 * it starts evicting. Evictions are based on the principle that it is better
243 * to store effects that will be re-used quickly, thus freeing up the cache for
244 * future stores. The 'speed' of reusability of each cache entry depends on the
245 * first group in the comparison corresponding to each cached effect size; the
246 * smaller the first group, the sooner it will be reached when iterating across
247 * groups in the ScoreMarkers function.
248 *
249 * So, the policy is to evict cache entries when the identity of the first
250 * group in the cached entry is larger than the identity of the first group for
251 * the incoming entry. Given that, if the cache is full, we have to throw away
252 * one of these effects anyway, I'd prefer to hold onto the one we're using
253 * soon, because at least it'll be freed up rapidly.
254 */
255template<typename Stat_>
256class EffectsCacher {
257public:
258 EffectsCacher(size_t ngenes, size_t ngroups, size_t cache_size) :
259 my_ngenes(ngenes),
260 my_ngroups(ngroups),
261 my_cache_size(std::min(cache_size, ngroups * (ngroups - 1) / 2)), // cap it at the maximum possible number of comparisons.
262 my_actions(ngroups),
263 my_common_cache(ngenes * my_cache_size),
264 my_staging_cache(ngroups)
265 {
266 my_unused_pool.reserve(my_cache_size);
267 auto ptr = my_common_cache.data();
268 for (size_t c = 0; c < my_cache_size; ++c, ptr += ngenes) {
269 my_unused_pool.push_back(ptr);
270 }
271 }
272
273private:
274 size_t my_ngenes;
275 size_t my_ngroups;
276 size_t my_cache_size;
277
278 std::vector<CacheAction> my_actions;
279
280 // 'common_cache' contains allocation so that we don't have to do
281 // a lot of fiddling with move constructors, swaps, etc.
282 std::vector<Stat_> my_common_cache;
283
284 // 'staging_cache' contains the set of cached effects in the other
285 // direction, i.e., all other groups compared to the current group. This is
286 // only used to avoid repeated look-ups in 'cached' while filling the
287 // effect size vectors; they will ultimately be transferred to cached after
288 // the processing for the current group is complete.
289 std::vector<Stat_*> my_staging_cache;
290
291 // 'unused_pool' contains the currently-unused set of pointers to free
292 // subarrays in 'my_common_cache'
293 std::vector<Stat_*> my_unused_pool;
294
295 // 'cached' contains the cached effect size vectors from previous groups. Note
296 // that the use of a map is deliberate as we need the sorting.
297 std::map<std::pair<size_t, size_t>, Stat_*> my_cached;
298
299public:
300 void clear() {
301 my_cached.clear();
302 }
303
304public:
305 void fill_effects_from_cache(size_t group, std::vector<double>& full_effects) {
306 // During calculation of effects, the current group (i.e., 'group') is
307 // the first group in the comparison and 'other' is the second group.
308 // However, remember that we want to cache the reverse effects, so in
309 // the cached entry, 'group' is second and 'other' is first.
310 for (size_t other = 0; other < my_ngroups; ++other) {
311 if (other == group) {
312 my_actions[other] = CacheAction::SKIP;
313 continue;
314 }
315
316 if (my_cache_size == 0) {
317 my_actions[other] = CacheAction::COMPUTE;
318 continue;
319 }
320
321 // If 'other' is later than 'group', it's a candidate to be cached,
322 // as it will be used when this method is called with 'group = other'.
323 // Note that the ACTUAL caching decision is made in the refinement step below.
324 if (other > group) {
325 my_actions[other] = CacheAction::CACHE;
326 continue;
327 }
328
329 // Need to recompute cache entries that were previously evicted. We
330 // do so if the cache is empty or the first group of the first cached
331 // entry has a higher index than the current group (and thus the
332 // desired comparison's effects cannot possibly exist in the cache).
333 if (my_cached.empty()) {
334 my_actions[other] = CacheAction::COMPUTE;
335 continue;
336 }
337
338 const auto& front = my_cached.begin()->first;
339 if (front.first > group || front.second > other) {
340 // Technically, the second clause should be (front.first == group && front.second > other).
341 // However, less-thans should be impossible as they should have been used up during processing
342 // of previous 'group' values. Thus, equality is already implied if the first condition fails.
343 my_actions[other] = CacheAction::COMPUTE;
344 continue;
345 }
346
347 // If we got past the previous clause, this implies that the first cache entry
348 // contains the effect sizes for the desired comparison (i.e., 'group - other').
349 // We thus transfer the cached vector to the full_set.
350 my_actions[other] = CacheAction::SKIP;
351 auto curcache = my_cached.begin()->second;
352 size_t offset = other;
353 for (size_t i = 0; i < my_ngenes; ++i, offset += my_ngroups) {
354 full_effects[offset /* = other + ngroups * i */] = curcache[i];
355 }
356
357 my_unused_pool.push_back(curcache);
358 my_cached.erase(my_cached.begin());
359 }
360
361 // Refining our choice of cacheable entries by doing a dummy run and
362 // seeing whether eviction actually happens. If it doesn't, we won't
363 // bother caching, because that would be a waste of memory accesses.
364 for (size_t other = 0; other < my_ngroups; ++other) {
365 if (my_actions[other] != CacheAction::CACHE) {
366 continue;
367 }
368
369 std::pair<size_t, size_t> key(other, group);
370 if (my_cached.size() < my_cache_size) {
371 auto ptr = my_unused_pool.back();
372 my_cached[key] = ptr;
373 my_staging_cache[other] = ptr;
374 my_unused_pool.pop_back();
375 continue;
376 }
377
378 // Looking at the last cache entry. If the first group of this
379 // entry is larger than the first group of the incoming entry, we
380 // evict it, as the incoming entry has faster reusability.
381 auto it = my_cached.end();
382 --it;
383 if ((it->first).first > other) {
384 auto ptr = it->second;
385 my_cached[key] = ptr;
386 my_staging_cache[other] = ptr;
387 my_cached.erase(it);
388 } else {
389 // Otherwise, if we're not going to do any evictions, we
390 // indicate that we shouldn't even bother computing the
391 // reverse, because we're won't cache the incoming entry.
392 my_actions[other] = CacheAction::COMPUTE;
393 }
394 }
395 }
396
397public:
398 CacheAction get_action(size_t other) const {
399 return my_actions[other];
400 }
401
402 Stat_* get_cache_location(size_t other) const {
403 return my_staging_cache[other];
404 }
405};
406
407template<typename Stat_, typename Rank_>
408void process_simple_summary_effects(
409 size_t ngenes,
410 size_t ngroups,
411 size_t nblocks,
412 size_t ncombos,
413 const std::vector<Stat_>& combo_means,
414 const std::vector<Stat_>& combo_vars,
415 const std::vector<Stat_>& combo_detected,
416 const ScoreMarkersSummaryBuffers<Stat_, Rank_>& output,
417 const std::vector<Stat_>& combo_weights,
418 double threshold,
419 size_t cache_size,
420 int num_threads)
421{
422 // First, computing the pooled averages to get that out of the way.
423 {
424 std::vector<Stat_> total_weights_per_group;
425 const Stat_* total_weights_ptr = combo_weights.data();
426 if (nblocks > 1) {
427 total_weights_per_group = compute_total_weight_per_group(ngroups, nblocks, combo_weights.data());
428 total_weights_ptr = total_weights_per_group.data();
429 }
430
431 tatami::parallelize([&](size_t, size_t start, size_t length) -> void {
432 size_t in_offset = ncombos * static_cast<size_t>(start);
433 const auto* tmp_means = combo_means.data() + in_offset;
434 const auto* tmp_detected = combo_detected.data() + in_offset;
435 for (size_t gene = start, end = start + length; gene < end; ++gene, tmp_means += ncombos, tmp_detected += ncombos) {
436 average_group_stats(gene, ngroups, nblocks, tmp_means, tmp_detected, combo_weights.data(), total_weights_ptr, output.mean, output.detected);
437 }
438 }, ngenes, num_threads);
439 }
440
441 PrecomputedPairwiseWeights<Stat_> preweights(ngroups, nblocks, combo_weights.data());
442 EffectsCacher<Stat_> cache(ngenes, ngroups, cache_size);
443 std::vector<Stat_> full_effects(ngroups * ngenes);
444 std::vector<std::vector<Stat_> > effect_buffers(num_threads);
445 for (auto& ef : effect_buffers) {
446 ef.resize(ngroups);
447 }
448
449 if (output.cohens_d.size()) {
450 cache.clear();
451 for (size_t group = 0; group < ngroups; ++group) {
452 cache.fill_effects_from_cache(group, full_effects);
453
454 tatami::parallelize([&](size_t t, size_t start, size_t length) -> void {
455 size_t in_offset = ncombos * static_cast<size_t>(start); // cast to avoid oerflow.
456 auto my_means = combo_means.data() + in_offset;
457 auto my_variances = combo_vars.data() + in_offset;
458 auto store_ptr = full_effects.data() + static_cast<size_t>(start) * ngroups; // cast to avoid overflow.
459 auto& effect_buffer = effect_buffers[t];
460
461 for (size_t gene = start, end = start + length; gene < end; ++gene, my_means += ncombos, my_variances += ncombos, store_ptr += ngroups) {
462 for (size_t other = 0; other < ngroups; ++other) {
463 auto cache_action = cache.get_action(other);
464 if (cache_action == internal::CacheAction::COMPUTE) {
465 store_ptr[other] = compute_pairwise_cohens_d_one_sided(group, other, my_means, my_variances, ngroups, nblocks, preweights, threshold);
466 } else if (cache_action == internal::CacheAction::CACHE) {
467 auto tmp = compute_pairwise_cohens_d_two_sided(group, other, my_means, my_variances, ngroups, nblocks, preweights, threshold);
468 store_ptr[other] = tmp.first;
469 cache.get_cache_location(other)[gene] = tmp.second;
470 }
471 }
472 summarize_comparisons(ngroups, store_ptr, group, gene, output.cohens_d[group], effect_buffer);
473 }
474 }, ngenes, num_threads);
475
476 auto mr = output.cohens_d[group].min_rank;
477 if (mr) {
478 compute_min_rank_for_group(ngenes, ngroups, group, full_effects.data(), mr, num_threads);
479 }
480 }
481 }
482
483 if (output.delta_mean.size()) {
484 cache.clear();
485 for (size_t group = 0; group < ngroups; ++group) {
486 cache.fill_effects_from_cache(group, full_effects);
487
488 tatami::parallelize([&](size_t t, size_t start, size_t length) -> void {
489 auto my_means = combo_means.data() + ncombos * static_cast<size_t>(start); // cast to size_t to avoid overflow.
490 auto store_ptr = full_effects.data() + static_cast<size_t>(start) * ngroups;
491 auto& effect_buffer = effect_buffers[t];
492
493 for (size_t gene = start, end = start + length; gene < end; ++gene, my_means += ncombos, store_ptr += ngroups) {
494 for (size_t other = 0; other < ngroups; ++other) {
495 auto cache_action = cache.get_action(other);
496 if (cache_action != internal::CacheAction::SKIP) {
497 auto val = compute_pairwise_simple_diff(group, other, my_means, ngroups, nblocks, preweights);
498 store_ptr[other] = val;
499 if (cache_action == CacheAction::CACHE) {
500 cache.get_cache_location(other)[gene] = -val;
501 }
502 }
503 }
504 summarize_comparisons(ngroups, store_ptr, group, gene, output.delta_mean[group], effect_buffer);
505 }
506 }, ngenes, num_threads);
507
508 auto mr = output.delta_mean[group].min_rank;
509 if (mr) {
510 compute_min_rank_for_group(ngenes, ngroups, group, full_effects.data(), mr, num_threads);
511 }
512 }
513 }
514
515 if (output.delta_detected.size()) {
516 cache.clear();
517 for (size_t group = 0; group < ngroups; ++group) {
518 cache.fill_effects_from_cache(group, full_effects);
519
520 tatami::parallelize([&](size_t t, size_t start, size_t length) -> void {
521 auto my_detected = combo_detected.data() + ncombos * static_cast<size_t>(start); // cast to size_t to avoid overflow.
522 auto store_ptr = full_effects.data() + static_cast<size_t>(start) * ngroups;
523 auto& effect_buffer = effect_buffers[t];
524
525 for (size_t gene = start, end = start + length; gene < end; ++gene, my_detected += ncombos, store_ptr += ngroups) {
526 for (size_t other = 0; other < ngroups; ++other) {
527 auto cache_action = cache.get_action(other);
528 if (cache_action != CacheAction::SKIP) {
529 auto val = compute_pairwise_simple_diff(group, other, my_detected, ngroups, nblocks, preweights);
530 store_ptr[other] = val;
531 if (cache_action == CacheAction::CACHE) {
532 cache.get_cache_location(other)[gene] = -val;
533 }
534 }
535 }
536 summarize_comparisons(ngroups, store_ptr, group, gene, output.delta_detected[group], effect_buffer);
537 }
538 }, ngenes, num_threads);
539
540 auto mr = output.delta_detected[group].min_rank;
541 if (mr) {
542 compute_min_rank_for_group(ngenes, ngroups, group, full_effects.data(), mr, num_threads);
543 }
544 }
545 }
546}
547
548template<typename Stat_, typename Rank_>
549ScoreMarkersSummaryBuffers<Stat_, Rank_> fill_summary_results(size_t ngenes, size_t ngroups, ScoreMarkersSummaryResults<Stat_, Rank_>& store, const ScoreMarkersSummaryOptions& options) {
550 ScoreMarkersSummaryBuffers<Stat_, Rank_> output;
551
552 internal::fill_average_results(ngenes, ngroups, store.mean, store.detected, output.mean, output.detected);
553
554 if (options.compute_cohens_d) {
555 output.cohens_d = internal::fill_summary_results(
556 ngenes,
557 ngroups,
558 store.cohens_d,
559 options.compute_min,
560 options.compute_mean,
561 options.compute_median,
562 options.compute_max,
563 options.compute_min_rank
564 );
565 }
566
567 if (options.compute_auc) {
568 output.auc = internal::fill_summary_results(
569 ngenes,
570 ngroups,
571 store.auc,
572 options.compute_min,
573 options.compute_mean,
574 options.compute_median,
575 options.compute_max,
576 options.compute_min_rank
577 );
578 }
579
580 if (options.compute_delta_mean) {
581 output.delta_mean = internal::fill_summary_results(
582 ngenes,
583 ngroups,
584 store.delta_mean,
585 options.compute_min,
586 options.compute_mean,
587 options.compute_median,
588 options.compute_max,
589 options.compute_min_rank
590 );
591 }
592
593 if (options.compute_delta_detected) {
594 output.delta_detected = internal::fill_summary_results(
595 ngenes,
596 ngroups,
597 store.delta_detected,
598 options.compute_min,
599 options.compute_mean,
600 options.compute_median,
601 options.compute_max,
602 options.compute_min_rank
603 );
604 }
605
606 return output;
607}
608
609}
646template<typename Value_, typename Index_, typename Group_, typename Stat_, typename Rank_>
649 const Group_* group,
652{
653 Index_ NC = matrix.ncol();
654 auto group_sizes = tatami_stats::tabulate_groups(group, NC);
655 size_t ngenes = static_cast<size_t>(matrix.nrow());
656 size_t ngroups = group_sizes.size();
657
658 // In most cases this doesn't really matter, but we do it for consistency with the 1-block case,
659 // and to account for variable weighting where non-zero block sizes get zero weight.
660 auto group_weights = scran_blocks::compute_weights<Stat_>(group_sizes, options.block_weight_policy, options.variable_block_weight_parameters);
661
662 size_t payload_size = ngenes * ngroups; // already cast to size_t to avoid overflow.
664
665 bool do_auc = !output.auc.empty();
666 std::vector<Stat_> tmp_auc;
667 Stat_* auc_ptr = NULL;
668 if (do_auc) {
669 tmp_auc.resize(ngroups * ngroups * ngenes);
670 auc_ptr = tmp_auc.data();
671 }
672
673 if (do_auc || matrix.prefer_rows()) {
674 internal::scan_matrix_by_row<true>(
675 matrix,
676 ngroups,
677 group,
678 1,
679 static_cast<int*>(NULL),
680 ngroups,
681 NULL,
685 auc_ptr,
688 options.threshold,
689 options.num_threads
690 );
691
692 } else {
693 internal::scan_matrix_by_column(
694 matrix,
695 ngroups,
696 group,
701 options.num_threads
702 );
703 }
704
705 internal::process_simple_summary_effects(
706 matrix.nrow(),
707 ngroups,
708 1,
709 ngroups,
713 output,
715 options.threshold,
716 options.cache_size,
717 options.num_threads
718 );
719
720 if (do_auc) {
721 internal::summarize_comparisons(ngenes, ngroups, auc_ptr, output.auc, options.num_threads);
722 internal::compute_min_rank_pairwise(ngenes, ngroups, auc_ptr, output.auc, options.num_threads);
723 }
724}
725
753template<typename Value_, typename Index_, typename Group_, typename Block_, typename Stat_, typename Rank_>
756 const Group_* group,
757 const Block_* block,
760{
761 Index_ NC = matrix.ncol();
762 size_t ngenes = static_cast<size_t>(matrix.nrow());
763 size_t ngroups = output.mean.size();
764 size_t nblocks = tatami_stats::total_groups(block, NC);
765
766 auto combinations = internal::create_combinations(ngroups, group, block, NC);
767 auto combo_sizes = internal::tabulate_combinations<Index_>(ngroups, nblocks, combinations);
768 size_t ncombos = combo_sizes.size();
769 auto combo_weights = scran_blocks::compute_weights<Stat_>(combo_sizes, options.block_weight_policy, options.variable_block_weight_parameters);
770
771 size_t payload_size = ngenes * ncombos; // already cast to size_t to avoid overflow.
773
774 bool do_auc = !output.auc.empty();
775 std::vector<Stat_> tmp_auc;
776 Stat_* auc_ptr = NULL;
777 if (do_auc) {
778 tmp_auc.resize(ngroups * ngroups * ngenes);
779 auc_ptr = tmp_auc.data();
780 }
781
782 if (do_auc || matrix.prefer_rows()) {
783 internal::scan_matrix_by_row<false>(
784 matrix,
785 ngroups,
786 group,
787 nblocks,
788 block,
789 ncombos,
790 combinations.data(),
794 auc_ptr,
797 options.threshold,
798 options.num_threads
799 );
800
801 } else {
802 internal::scan_matrix_by_column(
803 matrix,
804 ncombos,
805 combinations.data(),
810 options.num_threads
811 );
812 }
813
814 internal::process_simple_summary_effects(
815 matrix.nrow(),
816 ngroups,
817 nblocks,
818 ncombos,
822 output,
824 options.threshold,
825 options.cache_size,
826 options.num_threads
827 );
828
829 if (do_auc) {
830 internal::summarize_comparisons(ngenes, ngroups, auc_ptr, output.auc, options.num_threads);
831 internal::compute_min_rank_pairwise(ngenes, ngroups, auc_ptr, output.auc, options.num_threads);
832 }
833}
834
851template<typename Stat_ = double, typename Rank_ = int, typename Value_, typename Index_, typename Group_>
854 const Group_* group,
856{
857 size_t ngroups = tatami_stats::total_groups(group, matrix.ncol());
859 auto buffers = internal::fill_summary_results(matrix.nrow(), ngroups, output, options);
861 return output;
862}
863
883template<typename Stat_ = double, typename Rank_ = int, typename Value_, typename Index_, typename Group_, typename Block_>
896
897}
898
899#endif
Marker detection for single-cell data.
Definition score_markers_pairwise.hpp:23
void score_markers_summary(const tatami::Matrix< Value_, Index_ > &matrix, const Group_ *group, const ScoreMarkersSummaryOptions &options, const ScoreMarkersSummaryBuffers< Stat_, Rank_ > &output)
Definition score_markers_summary.hpp:647
void score_markers_summary_blocked(const tatami::Matrix< Value_, Index_ > &matrix, const Group_ *group, const Block_ *block, const ScoreMarkersSummaryOptions &options, const ScoreMarkersSummaryBuffers< Stat_, Rank_ > &output)
Definition score_markers_summary.hpp:754
void parallelize(Function_ fun, Index_ tasks, int threads)
Buffers for score_markers_pairwise() and friends.
Definition score_markers_pairwise.hpp:82
Stat_ * auc
Definition score_markers_pairwise.hpp:116
std::vector< Stat_ * > mean
Definition score_markers_pairwise.hpp:88
Buffers for score_markers_summary() and friends.
Definition score_markers_summary.hpp:119
std::vector< Stat_ * > detected
Definition score_markers_summary.hpp:132
std::vector< SummaryBuffers< Stat_, Rank_ > > delta_detected
Definition score_markers_summary.hpp:168
std::vector< Stat_ * > mean
Definition score_markers_summary.hpp:125
std::vector< SummaryBuffers< Stat_, Rank_ > > delta_mean
Definition score_markers_summary.hpp:159
std::vector< SummaryBuffers< Stat_, Rank_ > > cohens_d
Definition score_markers_summary.hpp:141
std::vector< SummaryBuffers< Stat_, Rank_ > > auc
Definition score_markers_summary.hpp:150
Options for score_markers_summary() and friends.
Definition score_markers_summary.hpp:28
bool compute_max
Definition score_markers_summary.hpp:93
bool compute_delta_detected
Definition score_markers_summary.hpp:69
int cache_size
Definition score_markers_summary.hpp:45
bool compute_auc
Definition score_markers_summary.hpp:57
bool compute_min
Definition score_markers_summary.hpp:75
bool compute_median
Definition score_markers_summary.hpp:87
bool compute_mean
Definition score_markers_summary.hpp:81
double threshold
Definition score_markers_summary.hpp:33
scran_blocks::WeightPolicy block_weight_policy
Definition score_markers_summary.hpp:104
bool compute_cohens_d
Definition score_markers_summary.hpp:51
bool compute_min_rank
Definition score_markers_summary.hpp:99
int num_threads
Definition score_markers_summary.hpp:39
bool compute_delta_mean
Definition score_markers_summary.hpp:63
scran_blocks::VariableWeightParameters variable_block_weight_parameters
Definition score_markers_summary.hpp:110
Results for score_markers_summary() and friends.
Definition score_markers_summary.hpp:177
std::vector< SummaryResults< Stat_, Rank_ > > cohens_d
Definition score_markers_summary.hpp:197
std::vector< std::vector< Stat_ > > mean
Definition score_markers_summary.hpp:182
std::vector< SummaryResults< Stat_, Rank_ > > auc
Definition score_markers_summary.hpp:206
std::vector< SummaryResults< Stat_, Rank_ > > delta_detected
Definition score_markers_summary.hpp:224
std::vector< std::vector< Stat_ > > detected
Definition score_markers_summary.hpp:188
std::vector< SummaryResults< Stat_, Rank_ > > delta_mean
Definition score_markers_summary.hpp:215
Utilities for effect summarization.