scran_variances
Model per-gene variance in expression
Loading...
Searching...
No Matches
model_gene_variances.hpp
Go to the documentation of this file.
1#ifndef SCRAN_MODEL_GENE_VARIANCES_H
2#define SCRAN_MODEL_GENE_VARIANCES_H
3
4#include <algorithm>
5#include <vector>
6#include <limits>
7#include <cstddef>
8
9#include "tatami/tatami.hpp"
10#include "tatami_stats/tatami_stats.hpp"
12#include "sanisizer/sanisizer.hpp"
13
15#include "utils.hpp"
16
22namespace scran_variances {
23
31enum class BlockAveragePolicy : unsigned char { MEAN, QUANTILE, NONE };
32
41
47 BlockAveragePolicy block_average_policy = BlockAveragePolicy::MEAN;
48
59 scran_blocks::WeightPolicy block_weight_policy = scran_blocks::WeightPolicy::VARIABLE;
60
67
71 // Back-compatibility only.
72 bool compute_average = true;
82 double block_quantile = 0.5;
83
88 int num_threads = 1;
89};
90
99template<typename Stat_>
104 Stat_* means;
105
109 Stat_* variances;
110
114 Stat_* fitted;
115
119 Stat_* residuals;
120};
121
126template<typename Stat_>
131 ModelGeneVariancesResults() = default;
132
133 ModelGeneVariancesResults(const std::size_t ngenes) :
134 means(sanisizer::cast<I<decltype(means.size())> >(ngenes)
135#ifdef SCRAN_VARIANCES_TEST_INIT
136 , SCRAN_VARIANCES_TEST_INIT
137#endif
138 ),
139 variances(sanisizer::cast<I<decltype(variances.size())> >(ngenes)
140#ifdef SCRAN_VARIANCES_TEST_INIT
141 , SCRAN_VARIANCES_TEST_INIT
142#endif
143 ),
144 fitted(sanisizer::cast<I<decltype(fitted.size())> >(ngenes)
145#ifdef SCRAN_VARIANCES_TEST_INIT
146 , SCRAN_VARIANCES_TEST_INIT
147#endif
148 ),
149 residuals(sanisizer::cast<I<decltype(residuals.size())> >(ngenes)
150#ifdef SCRAN_VARIANCES_TEST_INIT
151 , SCRAN_VARIANCES_TEST_INIT
152#endif
153 )
154 {}
162 std::vector<Stat_> means;
163
167 std::vector<Stat_> variances;
168
172 std::vector<Stat_> fitted;
173
177 std::vector<Stat_> residuals;
178};
179
184template<typename Stat_>
190 std::vector<ModelGeneVariancesBuffers<Stat_> > per_block;
191
197};
198
203template<typename Stat_>
209
210 ModelGeneVariancesBlockedResults(const std::size_t ngenes, const std::size_t nblocks, const bool do_average) :
211 average(do_average ? ngenes : 0)
212 {
213 per_block.reserve(nblocks);
214 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
215 per_block.emplace_back(ngenes);
216 }
217 }
225 std::vector<ModelGeneVariancesResults<Stat_> > per_block;
226
232};
233
237namespace internal {
238
239template<typename Value_, typename Index_, typename Stat_, typename Block_>
240void compute_variances_dense_row(
242 const std::vector<ModelGeneVariancesBuffers<Stat_> >& buffers,
243 const Block_* const block,
244 const std::vector<Index_>& block_size,
245 const int num_threads)
246{
247 const bool blocked = (block != NULL);
248 const auto nblocks = block_size.size();
249 const auto NR = mat.nrow(), NC = mat.ncol();
250
251 tatami::parallelize([&](const int, const Index_ start, const Index_ length) -> void {
252 auto tmp_means = sanisizer::create<std::vector<Stat_> >(blocked ? nblocks : 0);
253 auto tmp_vars = sanisizer::create<std::vector<Stat_> >(blocked ? nblocks : 0);
254
256 auto ext = tatami::consecutive_extractor<false>(mat, true, start, length);
257 for (Index_ r = start, end = start + length; r < end; ++r) {
258 auto ptr = ext->fetch(buffer.data());
259
260 if (blocked) {
261 tatami_stats::grouped_variances::direct(
262 ptr,
263 NC,
264 block,
265 nblocks,
266 block_size.data(),
267 tmp_means.data(),
268 tmp_vars.data(),
269 false,
270 static_cast<Index_*>(NULL)
271 );
272 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
273 buffers[b].means[r] = tmp_means[b];
274 buffers[b].variances[r] = tmp_vars[b];
275 }
276 } else {
277 const auto stat = tatami_stats::variances::direct(ptr, NC, false);
278 buffers[0].means[r] = stat.first;
279 buffers[0].variances[r] = stat.second;
280 }
281 }
282 }, NR, num_threads);
283}
284
285template<typename Value_, typename Index_, typename Stat_, typename Block_>
286void compute_variances_sparse_row(
288 const std::vector<ModelGeneVariancesBuffers<Stat_> >& buffers,
289 const Block_* const block,
290 const std::vector<Index_>& block_size,
291 const int num_threads)
292{
293 const bool blocked = (block != NULL);
294 const auto nblocks = block_size.size();
295 const auto NR = mat.nrow(), NC = mat.ncol();
296
297 tatami::parallelize([&](const int, const Index_ start, const Index_ length) -> void {
298 auto tmp_means = sanisizer::create<std::vector<Stat_> >(nblocks);
299 auto tmp_vars = sanisizer::create<std::vector<Stat_> >(nblocks);
300 auto tmp_nzero = sanisizer::create<std::vector<Index_> >(nblocks);
301
304 auto ext = tatami::consecutive_extractor<true>(mat, true, start, length, [&]{
305 tatami::Options opt;
306 opt.sparse_ordered_index = false;
307 return opt;
308 }());
309
310 for (Index_ r = start, end = start + length; r < end; ++r) {
311 auto range = ext->fetch(vbuffer.data(), ibuffer.data());
312
313 if (blocked) {
314 tatami_stats::grouped_variances::direct(
315 range.value,
316 range.index,
317 range.number,
318 block,
319 nblocks,
320 block_size.data(),
321 tmp_means.data(),
322 tmp_vars.data(),
323 tmp_nzero.data(),
324 false,
325 static_cast<Index_*>(NULL)
326 );
327 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
328 buffers[b].means[r] = tmp_means[b];
329 buffers[b].variances[r] = tmp_vars[b];
330 }
331 } else {
332 const auto stat = tatami_stats::variances::direct(range.value, range.number, NC, false);
333 buffers[0].means[r] = stat.first;
334 buffers[0].variances[r] = stat.second;
335 }
336 }
337 }, NR, num_threads);
338}
339
340template<typename Value_, typename Index_, typename Stat_, typename Block_>
341void compute_variances_dense_column(
343 const std::vector<ModelGeneVariancesBuffers<Stat_> >& buffers,
344 const Block_* const block,
345 const std::vector<Index_>& block_size,
346 const int num_threads)
347{
348 const bool blocked = (block != NULL);
349 const auto nblocks = block_size.size();
350 const auto NR = mat.nrow(), NC = mat.ncol();
351
352 tatami::parallelize([&](const int thread, const Index_ start, const Index_ length) -> void {
354 auto ext = tatami::consecutive_extractor<false>(mat, false, static_cast<Index_>(0), NC, start, length);
355
356 auto get_var = [&](Index_ b) -> Stat_* { return buffers[b].variances; };
357 tatami_stats::LocalOutputBuffers<Stat_, decltype(get_var)> local_vars(thread, nblocks, start, length, std::move(get_var));
358 auto get_mean = [&](Index_ b) -> Stat_* { return buffers[b].means; };
359 tatami_stats::LocalOutputBuffers<Stat_, decltype(get_mean)> local_means(thread, nblocks, start, length, std::move(get_mean));
360
361 std::vector<tatami_stats::variances::RunningDense<Stat_, Value_, Index_> > runners;
362 runners.reserve(nblocks);
363 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
364 runners.emplace_back(length, local_means.data(b), local_vars.data(b), false);
365 }
366
367 if (blocked) {
368 for (I<decltype(NC)> c = 0; c < NC; ++c) {
369 auto ptr = ext->fetch(buffer.data());
370 runners[block[c]].add(ptr);
371 }
372 } else {
373 for (I<decltype(NC)> c = 0; c < NC; ++c) {
374 auto ptr = ext->fetch(buffer.data());
375 runners[0].add(ptr);
376 }
377 }
378
379 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
380 runners[b].finish();
381 }
382 local_vars.transfer();
383 local_means.transfer();
384 }, NR, num_threads);
385}
386
387template<typename Value_, typename Index_, typename Stat_, typename Block_>
388void compute_variances_sparse_column(
390 const std::vector<ModelGeneVariancesBuffers<Stat_> >& buffers,
391 const Block_* const block,
392 const std::vector<Index_>& block_size,
393 const int num_threads)
394{
395 const bool blocked = (block != NULL);
396 const auto nblocks = block_size.size();
397 const auto NR = mat.nrow(), NC = mat.ncol();
398 auto nonzeros = sanisizer::create<std::vector<std::vector<Index_> > >(
399 nblocks,
401 );
402
403 tatami::parallelize([&](const int thread, const Index_ start, const Index_ length) -> void {
406 auto ext = tatami::consecutive_extractor<true>(mat, false, static_cast<Index_>(0), NC, start, length, [&]{
407 tatami::Options opt;
408 opt.sparse_ordered_index = false;
409 return opt;
410 }());
411
412 auto get_var = [&](Index_ b) -> Stat_* { return buffers[b].variances; };
413 tatami_stats::LocalOutputBuffers<Stat_, decltype(get_var)> local_vars(thread, nblocks, start, length, std::move(get_var));
414 auto get_mean = [&](Index_ b) -> Stat_* { return buffers[b].means; };
415 tatami_stats::LocalOutputBuffers<Stat_, decltype(get_mean)> local_means(thread, nblocks, start, length, std::move(get_mean));
416
417 std::vector<tatami_stats::variances::RunningSparse<Stat_, Value_, Index_> > runners;
418 runners.reserve(nblocks);
419 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
420 runners.emplace_back(length, local_means.data(b), local_vars.data(b), false, start);
421 }
422
423 if (blocked) {
424 for (I<decltype(NC)> c = 0; c < NC; ++c) {
425 auto range = ext->fetch(vbuffer.data(), ibuffer.data());
426 runners[block[c]].add(range.value, range.index, range.number);
427 }
428 } else {
429 for (I<decltype(NC)> c = 0; c < NC; ++c) {
430 auto range = ext->fetch(vbuffer.data(), ibuffer.data());
431 runners[0].add(range.value, range.index, range.number);
432 }
433 }
434
435 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
436 runners[b].finish();
437 }
438 local_vars.transfer();
439 local_means.transfer();
440 }, NR, num_threads);
441}
442
443template<typename Value_, typename Index_, typename Stat_, typename Block_>
444void compute_variances(
446 const std::vector<ModelGeneVariancesBuffers<Stat_> >& buffers,
447 const Block_* const block,
448 const std::vector<Index_>& block_size,
449 const int num_threads)
450{
451 if (mat.prefer_rows()) {
452 if (mat.sparse()) {
453 compute_variances_sparse_row(mat, buffers, block, block_size, num_threads);
454 } else {
455 compute_variances_dense_row(mat, buffers, block, block_size, num_threads);
456 }
457 } else {
458 if (mat.sparse()) {
459 compute_variances_sparse_column(mat, buffers, block, block_size, num_threads);
460 } else {
461 compute_variances_dense_column(mat, buffers, block, block_size, num_threads);
462 }
463 }
464}
465
466template<typename Stat_, typename Index_>
467void extract_weights(
468 const std::vector<Stat_>& block_weights,
469 const std::vector<Index_>& block_size,
470 const Index_ min_size,
471 std::vector<Stat_>& tmp_weights
472) {
473 const auto nblocks = block_weights.size();
474 tmp_weights.clear();
475 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
476 if (block_size[b] < min_size) { // skip blocks with insufficient cells.
477 continue;
478 }
479 tmp_weights.push_back(block_weights[b]);
480 }
481}
482
483template<typename Stat_, typename Index_, class Function_>
484void extract_pointers(
485 const std::vector<ModelGeneVariancesBuffers<Stat_> >& per_block,
486 const std::vector<Index_>& block_size,
487 const Index_ min_size,
488 const Function_ fun,
489 std::vector<Stat_*>& tmp_pointers
490) {
491 const auto nblocks = per_block.size();
492 tmp_pointers.clear();
493 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
494 if (block_size[b] < min_size) { // skip blocks with insufficient cells.
495 continue;
496 }
497 tmp_pointers.push_back(fun(per_block[b]));
498 }
499}
500
501}
531template<typename Value_, typename Index_, typename Block_, typename Stat_>
534 const Block_* const block,
536 const ModelGeneVariancesOptions& options)
537{
538 const Index_ NR = mat.nrow(), NC = mat.ncol();
539 std::vector<Index_> block_size;
540
541 if (block) {
542 block_size = tatami_stats::tabulate_groups(block, NC);
543 internal::compute_variances(mat, buffers.per_block, block, block_size, options.num_threads);
544 } else {
545 block_size.push_back(NC); // everything is one big block.
546 internal::compute_variances(mat, buffers.per_block, block, block_size, options.num_threads);
547 }
548 const auto nblocks = block_size.size();
549
551 auto fopt = options.fit_variance_trend_options;
552 fopt.num_threads = options.num_threads;
553 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
554 const auto& current = buffers.per_block[b];
555 if (block_size[b] >= 2) {
556 fit_variance_trend(NR, current.means, current.variances, current.fitted, current.residuals, work, fopt);
557 } else {
558 std::fill_n(current.fitted, NR, std::numeric_limits<double>::quiet_NaN());
559 std::fill_n(current.residuals, NR, std::numeric_limits<double>::quiet_NaN());
560 }
561 }
562
563 const auto ave_means = buffers.average.means;
564 const auto ave_variances = buffers.average.variances;
565 const auto ave_fitted = buffers.average.fitted;
566 const auto ave_residuals = buffers.average.residuals;
567
568 std::vector<Stat_*> tmp_pointers;
569 tmp_pointers.reserve(nblocks);
570
571 if (options.block_average_policy == BlockAveragePolicy::MEAN) {
572 const auto block_weight = scran_blocks::compute_weights<Stat_>(block_size, options.block_weight_policy, options.variable_block_weight_parameters);
573 std::vector<Stat_> tmp_weights;
574 tmp_weights.reserve(nblocks);
575
576 if (ave_means) {
577 internal::extract_weights(block_weight, block_size, static_cast<Index_>(1), tmp_weights);
578 internal::extract_pointers(buffers.per_block, block_size, static_cast<Index_>(1), [](const auto& x) -> Stat_* { return x.means; }, tmp_pointers);
579 scran_blocks::parallel_weighted_means(NR, tmp_pointers, tmp_weights.data(), ave_means, /* skip_nan = */ false);
580 }
581
582 // Skip blocks without enough cells to compute the variance.
583 internal::extract_weights(block_weight, block_size, static_cast<Index_>(2), tmp_weights);
584
585 if (ave_variances) {
586 internal::extract_pointers(buffers.per_block, block_size, static_cast<Index_>(2), [](const auto& x) -> Stat_* { return x.variances; }, tmp_pointers);
587 scran_blocks::parallel_weighted_means(NR, tmp_pointers, tmp_weights.data(), ave_variances, /* skip_nan = */ false);
588 }
589
590 if (ave_fitted) {
591 internal::extract_pointers(buffers.per_block, block_size, static_cast<Index_>(2), [](const auto& x) -> Stat_* { return x.fitted; }, tmp_pointers);
592 scran_blocks::parallel_weighted_means(NR, tmp_pointers, tmp_weights.data(), ave_fitted, /* skip_nan = */ false);
593 }
594
595 if (ave_residuals) {
596 internal::extract_pointers(buffers.per_block, block_size, static_cast<Index_>(2), [](const auto& x) -> Stat_* { return x.residuals; }, tmp_pointers);
597 scran_blocks::parallel_weighted_means(NR, tmp_pointers, tmp_weights.data(), ave_residuals, /* skip_nan = */ false);
598 }
599
600 } else if (options.block_average_policy == BlockAveragePolicy::QUANTILE) {
601 if (ave_means) {
602 internal::extract_pointers(buffers.per_block, block_size, static_cast<Index_>(1), [](const auto& x) -> Stat_* { return x.means; }, tmp_pointers);
603 scran_blocks::parallel_quantiles(NR, tmp_pointers, options.block_quantile, ave_means, /* skip_nan = */ false);
604 }
605
606 // Skip blocks without enough cells to compute the variance.
607
608 if (ave_variances) {
609 internal::extract_pointers(buffers.per_block, block_size, static_cast<Index_>(2), [](const auto& x) -> Stat_* { return x.variances; }, tmp_pointers);
610 scran_blocks::parallel_quantiles(NR, tmp_pointers, options.block_quantile, ave_variances, /* skip_nan = */ false);
611 }
612
613 if (ave_fitted) {
614 internal::extract_pointers(buffers.per_block, block_size, static_cast<Index_>(2), [](const auto& x) -> Stat_* { return x.fitted; }, tmp_pointers);
615 scran_blocks::parallel_quantiles(NR, tmp_pointers, options.block_quantile, ave_fitted, /* skip_nan = */ false);
616 }
617
618 if (ave_residuals) {
619 internal::extract_pointers(buffers.per_block, block_size, static_cast<Index_>(2), [](const auto& x) -> Stat_* { return x.residuals; }, tmp_pointers);
620 scran_blocks::parallel_quantiles(NR, tmp_pointers, options.block_quantile, ave_residuals, /* skip_nan = */ false);
621 }
622 }
623}
624
642template<typename Value_, typename Index_, typename Stat_>
645 ModelGeneVariancesBuffers<Stat_> buffers, // yes, the lack of a const ref here is deliberate, we need to move it into bbuffers anyway.
646 const ModelGeneVariancesOptions& options)
647{
649 bbuffers.per_block.emplace_back(std::move(buffers));
650
651 bbuffers.average.means = NULL;
652 bbuffers.average.variances = NULL;
653 bbuffers.average.fitted = NULL;
654 bbuffers.average.residuals = NULL;
655
656 model_gene_variances_blocked(mat, static_cast<Index_*>(NULL), bbuffers, options);
657}
658
672template<typename Stat_ = double, typename Value_, typename Index_>
674 ModelGeneVariancesResults<Stat_> output(mat.nrow()); // cast is safe, as any tatami Index_ can always fit into a size_t.
675
677 buffers.means = output.means.data();
678 buffers.variances = output.variances.data();
679 buffers.fitted = output.fitted.data();
680 buffers.residuals = output.residuals.data();
681
682 model_gene_variances(mat, std::move(buffers), options);
683 return output;
684}
685
703template<typename Stat_ = double, typename Value_, typename Index_, typename Block_>
705 const auto nblocks = (block ? tatami_stats::total_groups(block, mat.ncol()) : 1);
706
707 const bool do_average = options.compute_average /* for back-compatibility */ && options.block_average_policy != BlockAveragePolicy::NONE;
708 ModelGeneVariancesBlockedResults<Stat_> output(mat.nrow(), nblocks, do_average); // cast is safe, any tatami Index_ can always fit into a size_t.
709
711 sanisizer::resize(buffers.per_block, nblocks);
712 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
713 auto& current = buffers.per_block[b];
714 current.means = output.per_block[b].means.data();
715 current.variances = output.per_block[b].variances.data();
716 current.fitted = output.per_block[b].fitted.data();
717 current.residuals = output.per_block[b].residuals.data();
718 }
719
720 if (!do_average) {
721 buffers.average.means = NULL;
722 buffers.average.variances = NULL;
723 buffers.average.fitted = NULL;
724 buffers.average.residuals = NULL;
725 } else {
726 buffers.average.means = output.average.means.data();
727 buffers.average.variances = output.average.variances.data();
728 buffers.average.fitted = output.average.fitted.data();
729 buffers.average.residuals = output.average.residuals.data();
730 }
731
732 model_gene_variances_blocked(mat, block, buffers, options);
733 return output;
734}
735
736}
737
738#endif
virtual Index_ ncol() const=0
virtual Index_ nrow() const=0
virtual bool prefer_rows() const=0
virtual std::unique_ptr< MyopicSparseExtractor< Value_, Index_ > > sparse(bool row, const Options &opt) const=0
Fit a mean-variance trend to log-count data.
void compute_weights(const std::size_t num_blocks, const Size_ *const sizes, const WeightPolicy policy, const VariableWeightParameters &variable, Weight_ *const weights)
void parallel_weighted_means(const std::size_t n, std::vector< Stat_ * > in, const Weight_ *const w, Output_ *const out, const bool skip_nan)
void parallel_quantiles(const std::size_t n, const std::vector< Stat_ * > &in, const double quantile, Output_ *const out, const bool skip_nan)
Variance modelling for single-cell expression data.
Definition choose_highly_variable_genes.hpp:15
void model_gene_variances_blocked(const tatami::Matrix< Value_, Index_ > &mat, const Block_ *const block, const ModelGeneVariancesBlockedBuffers< Stat_ > &buffers, const ModelGeneVariancesOptions &options)
Definition model_gene_variances.hpp:532
void fit_variance_trend(const std::size_t n, const Float_ *const mean, const Float_ *const variance, Float_ *const fitted, Float_ *const residuals, FitVarianceTrendWorkspace< Float_ > &workspace, const FitVarianceTrendOptions &options)
Definition fit_variance_trend.hpp:131
void model_gene_variances(const tatami::Matrix< Value_, Index_ > &mat, ModelGeneVariancesBuffers< Stat_ > buffers, const ModelGeneVariancesOptions &options)
Definition model_gene_variances.hpp:643
BlockAveragePolicy
Definition model_gene_variances.hpp:31
void parallelize(Function_ fun, const Index_ tasks, const int threads)
Container_ create_container_of_Index_size(const Index_ x, Args_ &&... args)
auto consecutive_extractor(const Matrix< Value_, Index_ > &matrix, const bool row, const Index_ iter_start, const Index_ iter_length, Args_ &&... args)
Options for fit_variance_trend().
Definition fit_variance_trend.hpp:24
int num_threads
Definition fit_variance_trend.hpp:82
Workspace for fit_variance_trend().
Definition fit_variance_trend.hpp:91
Buffers for model_gene_variances_blocked().
Definition model_gene_variances.hpp:185
ModelGeneVariancesBuffers< Stat_ > average
Definition model_gene_variances.hpp:196
std::vector< ModelGeneVariancesBuffers< Stat_ > > per_block
Definition model_gene_variances.hpp:190
Results of model_gene_variances_blocked().
Definition model_gene_variances.hpp:204
std::vector< ModelGeneVariancesResults< Stat_ > > per_block
Definition model_gene_variances.hpp:225
ModelGeneVariancesResults< Stat_ > average
Definition model_gene_variances.hpp:231
Buffers for model_gene_variances() and friends.
Definition model_gene_variances.hpp:100
Stat_ * means
Definition model_gene_variances.hpp:104
Stat_ * residuals
Definition model_gene_variances.hpp:119
Stat_ * variances
Definition model_gene_variances.hpp:109
Stat_ * fitted
Definition model_gene_variances.hpp:114
Options for model_gene_variances() and friends.
Definition model_gene_variances.hpp:36
FitVarianceTrendOptions fit_variance_trend_options
Definition model_gene_variances.hpp:40
double block_quantile
Definition model_gene_variances.hpp:82
BlockAveragePolicy block_average_policy
Definition model_gene_variances.hpp:47
scran_blocks::VariableWeightParameters variable_block_weight_parameters
Definition model_gene_variances.hpp:66
int num_threads
Definition model_gene_variances.hpp:88
scran_blocks::WeightPolicy block_weight_policy
Definition model_gene_variances.hpp:59
Results of model_gene_variances().
Definition model_gene_variances.hpp:127
std::vector< Stat_ > fitted
Definition model_gene_variances.hpp:172
std::vector< Stat_ > means
Definition model_gene_variances.hpp:162
std::vector< Stat_ > residuals
Definition model_gene_variances.hpp:177
std::vector< Stat_ > variances
Definition model_gene_variances.hpp:167
bool sparse_ordered_index