scran_variances
Model per-gene variance in expression
Loading...
Searching...
No Matches
model_gene_variances.hpp
Go to the documentation of this file.
1#ifndef SCRAN_MODEL_GENE_VARIANCES_H
2#define SCRAN_MODEL_GENE_VARIANCES_H
3
4#include <algorithm>
5#include <vector>
6#include <limits>
7#include <cstddef>
8
9#include "tatami/tatami.hpp"
10#include "tatami_stats/tatami_stats.hpp"
12#include "sanisizer/sanisizer.hpp"
13
15#include "utils.hpp"
16
22namespace scran_variances {
23
31enum class BlockAveragePolicy : unsigned char { MEAN, QUANTILE, NONE };
32
42 bool trend = true;
43
49
55 BlockAveragePolicy block_average_policy = BlockAveragePolicy::MEAN;
56
67 scran_blocks::WeightPolicy block_weight_policy = scran_blocks::WeightPolicy::VARIABLE;
68
75
79 // Back-compatibility only.
80 bool compute_average = true;
90 double block_quantile = 0.5;
91
96 int num_threads = 1;
97};
98
107template<typename Stat_>
112 Stat_* means;
113
117 Stat_* variances;
118
124 Stat_* fitted;
125
131 Stat_* residuals;
132};
133
138template<typename Stat_>
143 ModelGeneVariancesResults() = default;
144
145 ModelGeneVariancesResults(const std::size_t ngenes, const bool trend) :
146 means(sanisizer::cast<I<decltype(means.size())> >(ngenes)
147#ifdef SCRAN_VARIANCES_TEST_INIT
148 , SCRAN_VARIANCES_TEST_INIT
149#endif
150 ),
151 variances(sanisizer::cast<I<decltype(variances.size())> >(ngenes)
152#ifdef SCRAN_VARIANCES_TEST_INIT
153 , SCRAN_VARIANCES_TEST_INIT
154#endif
155 ),
156 fitted(sanisizer::cast<I<decltype(fitted.size())> >(trend ? ngenes : 0)
157#ifdef SCRAN_VARIANCES_TEST_INIT
158 , SCRAN_VARIANCES_TEST_INIT
159#endif
160 ),
161 residuals(sanisizer::cast<I<decltype(residuals.size())> >(trend ? ngenes : 0)
162#ifdef SCRAN_VARIANCES_TEST_INIT
163 , SCRAN_VARIANCES_TEST_INIT
164#endif
165 )
166 {}
174 std::vector<Stat_> means;
175
179 std::vector<Stat_> variances;
180
186 std::vector<Stat_> fitted;
187
193 std::vector<Stat_> residuals;
194};
195
200template<typename Stat_>
206 std::vector<ModelGeneVariancesBuffers<Stat_> > per_block;
207
218};
219
224template<typename Stat_>
230
231 ModelGeneVariancesBlockedResults(const std::size_t ngenes, const std::size_t nblocks, const bool do_average, const bool do_trend) :
232 average(do_average ? ngenes : 0, do_trend)
233 {
234 per_block.reserve(nblocks);
235 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
236 per_block.emplace_back(ngenes, do_trend);
237 }
238 }
246 std::vector<ModelGeneVariancesResults<Stat_> > per_block;
247
253};
254
258namespace internal {
259
260template<typename Value_, typename Index_, typename Stat_, typename Block_>
261void compute_variances_dense_row(
263 const std::vector<ModelGeneVariancesBuffers<Stat_> >& buffers,
264 const Block_* const block,
265 const std::vector<Index_>& block_size,
266 const int num_threads)
267{
268 const bool blocked = (block != NULL);
269 const auto nblocks = block_size.size();
270 const auto NR = mat.nrow(), NC = mat.ncol();
271
272 tatami::parallelize([&](const int, const Index_ start, const Index_ length) -> void {
273 auto tmp_means = sanisizer::create<std::vector<Stat_> >(blocked ? nblocks : 0);
274 auto tmp_vars = sanisizer::create<std::vector<Stat_> >(blocked ? nblocks : 0);
275
277 auto ext = tatami::consecutive_extractor<false>(mat, true, start, length);
278 for (Index_ r = start, end = start + length; r < end; ++r) {
279 auto ptr = ext->fetch(buffer.data());
280
281 if (blocked) {
282 tatami_stats::grouped_variances::direct(
283 ptr,
284 NC,
285 block,
286 nblocks,
287 block_size.data(),
288 tmp_means.data(),
289 tmp_vars.data(),
290 false,
291 static_cast<Index_*>(NULL)
292 );
293 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
294 buffers[b].means[r] = tmp_means[b];
295 buffers[b].variances[r] = tmp_vars[b];
296 }
297 } else {
298 const auto stat = tatami_stats::variances::direct(ptr, NC, false);
299 buffers[0].means[r] = stat.first;
300 buffers[0].variances[r] = stat.second;
301 }
302 }
303 }, NR, num_threads);
304}
305
306template<typename Value_, typename Index_, typename Stat_, typename Block_>
307void compute_variances_sparse_row(
309 const std::vector<ModelGeneVariancesBuffers<Stat_> >& buffers,
310 const Block_* const block,
311 const std::vector<Index_>& block_size,
312 const int num_threads)
313{
314 const bool blocked = (block != NULL);
315 const auto nblocks = block_size.size();
316 const auto NR = mat.nrow(), NC = mat.ncol();
317
318 tatami::parallelize([&](const int, const Index_ start, const Index_ length) -> void {
319 auto tmp_means = sanisizer::create<std::vector<Stat_> >(nblocks);
320 auto tmp_vars = sanisizer::create<std::vector<Stat_> >(nblocks);
321 auto tmp_nzero = sanisizer::create<std::vector<Index_> >(nblocks);
322
325 auto ext = tatami::consecutive_extractor<true>(mat, true, start, length, [&]{
326 tatami::Options opt;
327 opt.sparse_ordered_index = false;
328 return opt;
329 }());
330
331 for (Index_ r = start, end = start + length; r < end; ++r) {
332 auto range = ext->fetch(vbuffer.data(), ibuffer.data());
333
334 if (blocked) {
335 tatami_stats::grouped_variances::direct(
336 range.value,
337 range.index,
338 range.number,
339 block,
340 nblocks,
341 block_size.data(),
342 tmp_means.data(),
343 tmp_vars.data(),
344 tmp_nzero.data(),
345 false,
346 static_cast<Index_*>(NULL)
347 );
348 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
349 buffers[b].means[r] = tmp_means[b];
350 buffers[b].variances[r] = tmp_vars[b];
351 }
352 } else {
353 const auto stat = tatami_stats::variances::direct(range.value, range.number, NC, false);
354 buffers[0].means[r] = stat.first;
355 buffers[0].variances[r] = stat.second;
356 }
357 }
358 }, NR, num_threads);
359}
360
361template<typename Value_, typename Index_, typename Stat_, typename Block_>
362void compute_variances_dense_column(
364 const std::vector<ModelGeneVariancesBuffers<Stat_> >& buffers,
365 const Block_* const block,
366 const std::vector<Index_>& block_size,
367 const int num_threads)
368{
369 const bool blocked = (block != NULL);
370 const auto nblocks = block_size.size();
371 const auto NR = mat.nrow(), NC = mat.ncol();
372
373 tatami::parallelize([&](const int thread, const Index_ start, const Index_ length) -> void {
375 auto ext = tatami::consecutive_extractor<false>(mat, false, static_cast<Index_>(0), NC, start, length);
376
377 auto get_var = [&](Index_ b) -> Stat_* { return buffers[b].variances; };
378 tatami_stats::LocalOutputBuffers<Stat_, decltype(get_var)> local_vars(thread, nblocks, start, length, std::move(get_var));
379 auto get_mean = [&](Index_ b) -> Stat_* { return buffers[b].means; };
380 tatami_stats::LocalOutputBuffers<Stat_, decltype(get_mean)> local_means(thread, nblocks, start, length, std::move(get_mean));
381
382 std::vector<tatami_stats::variances::RunningDense<Stat_, Value_, Index_> > runners;
383 runners.reserve(nblocks);
384 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
385 runners.emplace_back(length, local_means.data(b), local_vars.data(b), false);
386 }
387
388 if (blocked) {
389 for (I<decltype(NC)> c = 0; c < NC; ++c) {
390 auto ptr = ext->fetch(buffer.data());
391 runners[block[c]].add(ptr);
392 }
393 } else {
394 for (I<decltype(NC)> c = 0; c < NC; ++c) {
395 auto ptr = ext->fetch(buffer.data());
396 runners[0].add(ptr);
397 }
398 }
399
400 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
401 runners[b].finish();
402 }
403 local_vars.transfer();
404 local_means.transfer();
405 }, NR, num_threads);
406}
407
408template<typename Value_, typename Index_, typename Stat_, typename Block_>
409void compute_variances_sparse_column(
411 const std::vector<ModelGeneVariancesBuffers<Stat_> >& buffers,
412 const Block_* const block,
413 const std::vector<Index_>& block_size,
414 const int num_threads)
415{
416 const bool blocked = (block != NULL);
417 const auto nblocks = block_size.size();
418 const auto NR = mat.nrow(), NC = mat.ncol();
419 auto nonzeros = sanisizer::create<std::vector<std::vector<Index_> > >(
420 nblocks,
422 );
423
424 tatami::parallelize([&](const int thread, const Index_ start, const Index_ length) -> void {
427 auto ext = tatami::consecutive_extractor<true>(mat, false, static_cast<Index_>(0), NC, start, length, [&]{
428 tatami::Options opt;
429 opt.sparse_ordered_index = false;
430 return opt;
431 }());
432
433 auto get_var = [&](Index_ b) -> Stat_* { return buffers[b].variances; };
434 tatami_stats::LocalOutputBuffers<Stat_, decltype(get_var)> local_vars(thread, nblocks, start, length, std::move(get_var));
435 auto get_mean = [&](Index_ b) -> Stat_* { return buffers[b].means; };
436 tatami_stats::LocalOutputBuffers<Stat_, decltype(get_mean)> local_means(thread, nblocks, start, length, std::move(get_mean));
437
438 std::vector<tatami_stats::variances::RunningSparse<Stat_, Value_, Index_> > runners;
439 runners.reserve(nblocks);
440 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
441 runners.emplace_back(length, local_means.data(b), local_vars.data(b), false, start);
442 }
443
444 if (blocked) {
445 for (I<decltype(NC)> c = 0; c < NC; ++c) {
446 auto range = ext->fetch(vbuffer.data(), ibuffer.data());
447 runners[block[c]].add(range.value, range.index, range.number);
448 }
449 } else {
450 for (I<decltype(NC)> c = 0; c < NC; ++c) {
451 auto range = ext->fetch(vbuffer.data(), ibuffer.data());
452 runners[0].add(range.value, range.index, range.number);
453 }
454 }
455
456 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
457 runners[b].finish();
458 }
459 local_vars.transfer();
460 local_means.transfer();
461 }, NR, num_threads);
462}
463
464template<typename Value_, typename Index_, typename Stat_, typename Block_>
465void compute_variances(
467 const std::vector<ModelGeneVariancesBuffers<Stat_> >& buffers,
468 const Block_* const block,
469 const std::vector<Index_>& block_size,
470 const int num_threads)
471{
472 if (mat.prefer_rows()) {
473 if (mat.sparse()) {
474 compute_variances_sparse_row(mat, buffers, block, block_size, num_threads);
475 } else {
476 compute_variances_dense_row(mat, buffers, block, block_size, num_threads);
477 }
478 } else {
479 if (mat.sparse()) {
480 compute_variances_sparse_column(mat, buffers, block, block_size, num_threads);
481 } else {
482 compute_variances_dense_column(mat, buffers, block, block_size, num_threads);
483 }
484 }
485}
486
487template<typename Stat_, typename Index_>
488void extract_weights(
489 const std::vector<Stat_>& block_weights,
490 const std::vector<Index_>& block_size,
491 const Index_ min_size,
492 std::vector<Stat_>& tmp_weights
493) {
494 const auto nblocks = block_weights.size();
495 tmp_weights.clear();
496 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
497 if (block_size[b] < min_size) { // skip blocks with insufficient cells.
498 continue;
499 }
500 tmp_weights.push_back(block_weights[b]);
501 }
502}
503
504template<typename Stat_, typename Index_, class Function_>
505void extract_pointers(
506 const std::vector<ModelGeneVariancesBuffers<Stat_> >& per_block,
507 const std::vector<Index_>& block_size,
508 const Index_ min_size,
509 const Function_ fun,
510 std::vector<Stat_*>& tmp_pointers
511) {
512 const auto nblocks = per_block.size();
513 tmp_pointers.clear();
514 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
515 if (block_size[b] < min_size) { // skip blocks with insufficient cells.
516 continue;
517 }
518 tmp_pointers.push_back(fun(per_block[b]));
519 }
520}
521
522}
552template<typename Value_, typename Index_, typename Block_, typename Stat_>
555 const Block_* const block,
557 const ModelGeneVariancesOptions& options
558) {
559 const Index_ NR = mat.nrow(), NC = mat.ncol();
560 std::vector<Index_> block_size;
561
562 if (block) {
563 block_size = tatami_stats::tabulate_groups(block, NC);
564 internal::compute_variances(mat, buffers.per_block, block, block_size, options.num_threads);
565 } else {
566 block_size.push_back(NC); // everything is one big block.
567 internal::compute_variances(mat, buffers.per_block, block, block_size, options.num_threads);
568 }
569 const auto nblocks = block_size.size();
570
572 auto fopt = options.fit_variance_trend_options;
573 fopt.num_threads = options.num_threads;
574 bool all_trends_fitted = true;
575
576 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
577 const auto& current = buffers.per_block[b];
578 if (current.fitted == NULL || current.residuals == NULL) {
579 all_trends_fitted = false;
580 continue;
581 }
582 if (block_size[b] >= 2) {
583 fit_variance_trend(NR, current.means, current.variances, current.fitted, current.residuals, work, fopt);
584 } else {
585 std::fill_n(current.fitted, NR, std::numeric_limits<double>::quiet_NaN());
586 std::fill_n(current.residuals, NR, std::numeric_limits<double>::quiet_NaN());
587 }
588 }
589
590 const auto ave_means = buffers.average.means;
591 const auto ave_variances = buffers.average.variances;
592 const auto ave_fitted = buffers.average.fitted;
593 const auto ave_residuals = buffers.average.residuals;
594
595 if ((ave_fitted || ave_residuals) && !all_trends_fitted) {
596 throw std::runtime_error("cannot compute average fitted values/residuals without per-block trend fits");
597 }
598
599 std::vector<Stat_*> tmp_pointers;
600 tmp_pointers.reserve(nblocks);
601
602 if (options.block_average_policy == BlockAveragePolicy::MEAN) {
603 const auto block_weight = scran_blocks::compute_weights<Stat_>(block_size, options.block_weight_policy, options.variable_block_weight_parameters);
604 std::vector<Stat_> tmp_weights;
605 tmp_weights.reserve(nblocks);
606
607 if (ave_means) {
608 internal::extract_weights(block_weight, block_size, static_cast<Index_>(1), tmp_weights);
609 internal::extract_pointers(buffers.per_block, block_size, static_cast<Index_>(1), [](const auto& x) -> Stat_* { return x.means; }, tmp_pointers);
610 scran_blocks::parallel_weighted_means(NR, tmp_pointers, tmp_weights.data(), ave_means, /* skip_nan = */ false);
611 }
612
613 // Skip blocks without enough cells to compute the variance.
614 internal::extract_weights(block_weight, block_size, static_cast<Index_>(2), tmp_weights);
615
616 if (ave_variances) {
617 internal::extract_pointers(buffers.per_block, block_size, static_cast<Index_>(2), [](const auto& x) -> Stat_* { return x.variances; }, tmp_pointers);
618 scran_blocks::parallel_weighted_means(NR, tmp_pointers, tmp_weights.data(), ave_variances, /* skip_nan = */ false);
619 }
620
621 if (ave_fitted) {
622 internal::extract_pointers(buffers.per_block, block_size, static_cast<Index_>(2), [](const auto& x) -> Stat_* { return x.fitted; }, tmp_pointers);
623 scran_blocks::parallel_weighted_means(NR, tmp_pointers, tmp_weights.data(), ave_fitted, /* skip_nan = */ false);
624 }
625
626 if (ave_residuals) {
627 internal::extract_pointers(buffers.per_block, block_size, static_cast<Index_>(2), [](const auto& x) -> Stat_* { return x.residuals; }, tmp_pointers);
628 scran_blocks::parallel_weighted_means(NR, tmp_pointers, tmp_weights.data(), ave_residuals, /* skip_nan = */ false);
629 }
630
631 } else if (options.block_average_policy == BlockAveragePolicy::QUANTILE) {
632 if (ave_means) {
633 internal::extract_pointers(buffers.per_block, block_size, static_cast<Index_>(1), [](const auto& x) -> Stat_* { return x.means; }, tmp_pointers);
634 scran_blocks::parallel_quantiles(NR, tmp_pointers, options.block_quantile, ave_means, /* skip_nan = */ false);
635 }
636
637 // Skip blocks without enough cells to compute the variance.
638
639 if (ave_variances) {
640 internal::extract_pointers(buffers.per_block, block_size, static_cast<Index_>(2), [](const auto& x) -> Stat_* { return x.variances; }, tmp_pointers);
641 scran_blocks::parallel_quantiles(NR, tmp_pointers, options.block_quantile, ave_variances, /* skip_nan = */ false);
642 }
643
644 if (ave_fitted) {
645 internal::extract_pointers(buffers.per_block, block_size, static_cast<Index_>(2), [](const auto& x) -> Stat_* { return x.fitted; }, tmp_pointers);
646 scran_blocks::parallel_quantiles(NR, tmp_pointers, options.block_quantile, ave_fitted, /* skip_nan = */ false);
647 }
648
649 if (ave_residuals) {
650 internal::extract_pointers(buffers.per_block, block_size, static_cast<Index_>(2), [](const auto& x) -> Stat_* { return x.residuals; }, tmp_pointers);
651 scran_blocks::parallel_quantiles(NR, tmp_pointers, options.block_quantile, ave_residuals, /* skip_nan = */ false);
652 }
653 }
654}
655
673template<typename Value_, typename Index_, typename Stat_>
676 ModelGeneVariancesBuffers<Stat_> buffers, // yes, the lack of a const ref here is deliberate, we need to move it into bbuffers anyway.
677 const ModelGeneVariancesOptions& options)
678{
680 bbuffers.per_block.emplace_back(std::move(buffers));
681
682 bbuffers.average.means = NULL;
683 bbuffers.average.variances = NULL;
684 bbuffers.average.fitted = NULL;
685 bbuffers.average.residuals = NULL;
686
687 model_gene_variances_blocked(mat, static_cast<Index_*>(NULL), bbuffers, options);
688}
689
703template<typename Stat_ = double, typename Value_, typename Index_>
705 ModelGeneVariancesResults<Stat_> output(mat.nrow(), options.trend); // cast is safe, as any tatami Index_ can always fit into a size_t.
706
708 buffers.means = output.means.data();
709 buffers.variances = output.variances.data();
710
711 if (options.trend) {
712 buffers.fitted = output.fitted.data();
713 buffers.residuals = output.residuals.data();
714 } else {
715 buffers.fitted = NULL;
716 buffers.residuals = NULL;
717 }
718
719 model_gene_variances(mat, std::move(buffers), options);
720 return output;
721}
722
740template<typename Stat_ = double, typename Value_, typename Index_, typename Block_>
742 const auto nblocks = (block ? tatami_stats::total_groups(block, mat.ncol()) : 1);
743
744 const bool do_average = options.compute_average /* for back-compatibility */ && options.block_average_policy != BlockAveragePolicy::NONE;
746 mat.nrow(), // cast is safe, any tatami Index_ can always fit into a size_t.
747 nblocks,
748 do_average,
749 options.trend
750 );
751
753 sanisizer::resize(buffers.per_block, nblocks);
754 for (I<decltype(nblocks)> b = 0; b < nblocks; ++b) {
755 auto& current = buffers.per_block[b];
756 current.means = output.per_block[b].means.data();
757 current.variances = output.per_block[b].variances.data();
758
759 if (options.trend) {
760 current.fitted = output.per_block[b].fitted.data();
761 current.residuals = output.per_block[b].residuals.data();
762 } else {
763 current.fitted = NULL;
764 current.residuals = NULL;
765 }
766 }
767
768 if (!do_average) {
769 buffers.average.means = NULL;
770 buffers.average.variances = NULL;
771 buffers.average.fitted = NULL;
772 buffers.average.residuals = NULL;
773 } else {
774 buffers.average.means = output.average.means.data();
775 buffers.average.variances = output.average.variances.data();
776
777 if (options.trend) {
778 buffers.average.fitted = output.average.fitted.data();
779 buffers.average.residuals = output.average.residuals.data();
780 } else {
781 buffers.average.fitted = NULL;
782 buffers.average.residuals = NULL;
783 }
784 }
785
786 model_gene_variances_blocked(mat, block, buffers, options);
787 return output;
788}
789
790}
791
792#endif
virtual Index_ ncol() const=0
virtual Index_ nrow() const=0
virtual bool prefer_rows() const=0
virtual std::unique_ptr< MyopicSparseExtractor< Value_, Index_ > > sparse(bool row, const Options &opt) const=0
Fit a mean-variance trend to log-count data.
void compute_weights(const std::size_t num_blocks, const Size_ *const sizes, const WeightPolicy policy, const VariableWeightParameters &variable, Weight_ *const weights)
void parallel_weighted_means(const std::size_t n, std::vector< Stat_ * > in, const Weight_ *const w, Output_ *const out, const bool skip_nan)
void parallel_quantiles(const std::size_t n, const std::vector< Stat_ * > &in, const double quantile, Output_ *const out, const bool skip_nan)
Variance modelling for single-cell expression data.
Definition choose_highly_variable_genes.hpp:15
void model_gene_variances_blocked(const tatami::Matrix< Value_, Index_ > &mat, const Block_ *const block, const ModelGeneVariancesBlockedBuffers< Stat_ > &buffers, const ModelGeneVariancesOptions &options)
Definition model_gene_variances.hpp:553
void fit_variance_trend(const std::size_t n, const Float_ *const mean, const Float_ *const variance, Float_ *const fitted, Float_ *const residuals, FitVarianceTrendWorkspace< Float_ > &workspace, const FitVarianceTrendOptions &options)
Definition fit_variance_trend.hpp:131
void model_gene_variances(const tatami::Matrix< Value_, Index_ > &mat, ModelGeneVariancesBuffers< Stat_ > buffers, const ModelGeneVariancesOptions &options)
Definition model_gene_variances.hpp:674
BlockAveragePolicy
Definition model_gene_variances.hpp:31
int parallelize(Function_ fun, const Index_ tasks, const int workers)
Container_ create_container_of_Index_size(const Index_ x, Args_ &&... args)
auto consecutive_extractor(const Matrix< Value_, Index_ > &matrix, const bool row, const Index_ iter_start, const Index_ iter_length, Args_ &&... args)
Options for fit_variance_trend().
Definition fit_variance_trend.hpp:24
int num_threads
Definition fit_variance_trend.hpp:82
Workspace for fit_variance_trend().
Definition fit_variance_trend.hpp:91
Buffers for model_gene_variances_blocked().
Definition model_gene_variances.hpp:201
ModelGeneVariancesBuffers< Stat_ > average
Definition model_gene_variances.hpp:217
std::vector< ModelGeneVariancesBuffers< Stat_ > > per_block
Definition model_gene_variances.hpp:206
Results of model_gene_variances_blocked().
Definition model_gene_variances.hpp:225
std::vector< ModelGeneVariancesResults< Stat_ > > per_block
Definition model_gene_variances.hpp:246
ModelGeneVariancesResults< Stat_ > average
Definition model_gene_variances.hpp:252
Buffers for model_gene_variances() and friends.
Definition model_gene_variances.hpp:108
Stat_ * means
Definition model_gene_variances.hpp:112
Stat_ * residuals
Definition model_gene_variances.hpp:131
Stat_ * variances
Definition model_gene_variances.hpp:117
Stat_ * fitted
Definition model_gene_variances.hpp:124
Options for model_gene_variances() and friends.
Definition model_gene_variances.hpp:36
FitVarianceTrendOptions fit_variance_trend_options
Definition model_gene_variances.hpp:48
double block_quantile
Definition model_gene_variances.hpp:90
bool trend
Definition model_gene_variances.hpp:42
BlockAveragePolicy block_average_policy
Definition model_gene_variances.hpp:55
scran_blocks::VariableWeightParameters variable_block_weight_parameters
Definition model_gene_variances.hpp:74
int num_threads
Definition model_gene_variances.hpp:96
scran_blocks::WeightPolicy block_weight_policy
Definition model_gene_variances.hpp:67
Results of model_gene_variances().
Definition model_gene_variances.hpp:139
std::vector< Stat_ > fitted
Definition model_gene_variances.hpp:186
std::vector< Stat_ > means
Definition model_gene_variances.hpp:174
std::vector< Stat_ > residuals
Definition model_gene_variances.hpp:193
std::vector< Stat_ > variances
Definition model_gene_variances.hpp:179
bool sparse_ordered_index