1#ifndef SCRAN_PCA_BLOCKED_PCA_HPP
2#define SCRAN_PCA_BLOCKED_PCA_HPP
15#include "sanisizer/sanisizer.hpp"
114template<
typename Index_,
class EigenVector_>
115struct BlockingDetails {
116 std::vector<Index_> block_size;
118 bool weighted =
false;
119 typedef typename EigenVector_::Scalar Weight;
122 std::vector<Weight> per_element_weight;
123 Weight total_block_weight = 0;
124 EigenVector_ expanded_weights;
127template<
class EigenVector_,
typename Index_,
typename Block_>
128BlockingDetails<Index_, EigenVector_> compute_blocking_details(
134 BlockingDetails<Index_, EigenVector_> output;
135 output.block_size = tatami_stats::tabulate_groups(block, ncells);
136 if (block_weight_policy == scran_blocks::WeightPolicy::NONE) {
140 const auto& block_size = output.block_size;
141 const auto nblocks = block_size.size();
142 output.weighted =
true;
143 auto& total_weight = output.total_block_weight;
144 auto& element_weight = output.per_element_weight;
145 sanisizer::resize(element_weight, nblocks);
147 for (
decltype(I(nblocks)) b = 0; b < nblocks; ++b) {
148 const auto bsize = block_size[b];
154 typename EigenVector_::Scalar block_weight = 1;
155 if (block_weight_policy == scran_blocks::WeightPolicy::VARIABLE) {
159 element_weight[b] = block_weight / bsize;
160 total_weight += block_weight;
162 element_weight[b] = 0;
167 if (total_weight == 0) {
172 auto sqrt_weights = element_weight;
173 for (
auto& s : sqrt_weights) {
177 auto& expanded = output.expanded_weights;
178 sanisizer::resize(expanded, ncells);
179 for (Index_ c = 0; c < ncells; ++c) {
180 expanded.coeffRef(c) = sqrt_weights[block[c]];
190template<
typename Num_,
typename Value_,
typename Index_,
typename Block_,
typename EigenVector_,
typename Float_>
191void compute_sparse_mean_and_variance_blocked(
192 const Num_ num_nonzero,
193 const Value_* values,
194 const Index_* indices,
196 const BlockingDetails<Index_, EigenVector_>& block_details,
199 std::vector<Index_>& block_copy,
202 const auto& block_size = block_details.block_size;
203 const auto nblocks = block_size.size();
205 std::fill_n(centers, nblocks, 0);
206 for (Num_ i = 0; i < num_nonzero; ++i) {
207 centers[block[indices[i]]] += values[i];
209 for (
decltype(I(nblocks)) b = 0; b < nblocks; ++b) {
210 auto bsize = block_size[b];
221 std::copy(block_size.begin(), block_size.end(), block_copy.begin());
223 if (block_details.weighted) {
224 for (Num_ i = 0; i < num_nonzero; ++i) {
225 const Block_ curb = block[indices[i]];
226 const auto diff = values[i] - centers[curb];
227 variance += diff * diff * block_details.per_element_weight[curb];
230 for (
decltype(I(nblocks)) b = 0; b < nblocks; ++b) {
231 const auto val = centers[b];
232 variance += val * val * block_copy[b] * block_details.per_element_weight[b];
235 for (Num_ i = 0; i < num_nonzero; ++i) {
236 const Block_ curb = block[indices[i]];
237 const auto diff = values[i] - centers[curb];
238 variance += diff * diff;
241 for (
decltype(I(nblocks)) b = 0; b < nblocks; ++b) {
242 const auto val = centers[b];
243 variance += val * val * block_copy[b];
256 variance /= num_all - 1;
259template<
class IrlbaSparseMatrix_,
typename Block_,
class Index_,
class EigenVector_,
class EigenMatrix_>
260void compute_blockwise_mean_and_variance_realized_sparse(
261 const IrlbaSparseMatrix_& emat,
263 const BlockingDetails<Index_, EigenVector_>& block_details,
264 EigenMatrix_& centers,
265 EigenVector_& variances,
268 const auto ngenes = emat.cols();
269 tatami::parallelize([&](
const int,
const decltype(I(ngenes)) start,
const decltype(I(ngenes)) length) ->
void {
270 const auto ncells = emat.rows();
271 const auto& values = emat.get_values();
272 const auto& indices = emat.get_indices();
273 const auto& pointers = emat.get_pointers();
275 const auto nblocks = block_details.block_size.size();
276 static_assert(!EigenMatrix_::IsRowMajor);
277 auto block_copy = sanisizer::create<std::vector<Index_> >(nblocks);
279 for (
decltype(I(start)) g = start, end = start + length; g < end; ++g) {
280 const auto offset = pointers[g];
281 const auto next_offset = pointers[g + 1];
282 compute_sparse_mean_and_variance_blocked(
283 static_cast<decltype(I(ncells))
>(next_offset - offset),
284 values.data() + offset,
285 indices.data() + offset,
288 centers.data() + sanisizer::product_unsafe<std::size_t>(g, nblocks),
294 }, ngenes, nthreads);
297template<
typename Num_,
typename Value_,
typename Block_,
typename Index_,
typename EigenVector_,
typename Float_>
298void compute_dense_mean_and_variance_blocked(
300 const Value_* values,
302 const BlockingDetails<Index_, EigenVector_>& block_details,
306 const auto& block_size = block_details.block_size;
307 const auto nblocks = block_size.size();
308 std::fill_n(centers, nblocks, 0);
309 for (Num_ i = 0; i < number; ++i) {
310 centers[block[i]] += values[i];
312 for (
decltype(I(nblocks)) b = 0; b < nblocks; ++b) {
313 const auto& bsize = block_size[b];
321 if (block_details.weighted) {
322 for (Num_ i = 0; i < number; ++i) {
323 const auto curb = block[i];
324 const auto delta = values[i] - centers[curb];
325 variance += delta * delta * block_details.per_element_weight[curb];
328 for (Num_ i = 0; i < number; ++i) {
329 const auto curb = block[i];
330 const auto delta = values[i] - centers[curb];
331 variance += delta * delta;
335 variance /= number - 1;
338template<
class EigenMatrix_,
typename Block_,
class Index_,
class EigenVector_>
339void compute_blockwise_mean_and_variance_realized_dense(
340 const EigenMatrix_& emat,
342 const BlockingDetails<Index_, EigenVector_>& block_details,
343 EigenMatrix_& centers,
344 EigenVector_& variances,
347 const auto ngenes = emat.cols();
348 tatami::parallelize([&](
const int,
const decltype(I(ngenes)) start,
const decltype(I(ngenes)) length) ->
void {
349 const auto ncells = emat.rows();
350 static_assert(!EigenMatrix_::IsRowMajor);
351 const auto nblocks = block_details.block_size.size();
352 for (
decltype(I(start)) g = start, end = start + length; g < end; ++g) {
353 compute_dense_mean_and_variance_blocked(
355 emat.data() + sanisizer::product_unsafe<std::size_t>(g, ncells),
358 centers.data() + sanisizer::product_unsafe<std::size_t>(g, nblocks),
362 }, ngenes, nthreads);
365template<
typename Value_,
typename Index_,
typename Block_,
class EigenMatrix_,
class EigenVector_>
366void compute_blockwise_mean_and_variance_tatami(
369 const BlockingDetails<Index_, EigenVector_>& block_details,
370 EigenMatrix_& centers,
371 EigenVector_& variances,
374 const auto& block_size = block_details.block_size;
375 const auto nblocks = block_size.size();
376 const Index_ ngenes = mat.
nrow();
377 const Index_ ncells = mat.
ncol();
381 static_assert(!EigenMatrix_::IsRowMajor);
382 auto block_copy = sanisizer::create<std::vector<Index_> >(nblocks);
388 for (Index_ g = start, end = start + length; g < end; ++g) {
389 auto range = ext->fetch(vbuffer.data(), ibuffer.data());
390 compute_sparse_mean_and_variance_blocked(
396 centers.data() + sanisizer::product_unsafe<std::size_t>(g, nblocks),
404 for (Index_ g = start, end = start + length; g < end; ++g) {
405 auto ptr = ext->fetch(vbuffer.data());
406 compute_dense_mean_and_variance_blocked(
411 centers.data() + sanisizer::product_unsafe<std::size_t>(g, nblocks),
416 }, ngenes, nthreads);
419 typedef typename EigenVector_::Scalar Scalar;
420 std::vector<std::pair<
decltype(I(nblocks)), Scalar> > block_multipliers;
421 block_multipliers.reserve(nblocks);
423 for (
decltype(I(nblocks)) b = 0; b < nblocks; ++b) {
424 const auto bsize = block_size[b];
426 Scalar mult = bsize - 1;
427 if (block_details.weighted) {
428 mult *= block_details.per_element_weight[b];
430 block_multipliers.emplace_back(b, mult);
435 std::vector<std::vector<Scalar> > re_centers, re_variances;
436 re_centers.reserve(nblocks);
437 re_variances.reserve(nblocks);
438 for (
decltype(I(nblocks)) b = 0; b < nblocks; ++b) {
439 re_centers.emplace_back(length);
440 re_variances.emplace_back(length);
446 std::vector<tatami_stats::variances::RunningSparse<Scalar, Value_, Index_> > running;
447 running.reserve(nblocks);
448 for (
decltype(I(nblocks)) b = 0; b < nblocks; ++b) {
449 running.emplace_back(length, re_centers[b].data(), re_variances[b].data(),
false, start);
454 for (Index_ c = 0; c < ncells; ++c) {
455 const auto range = ext->fetch(vbuffer.data(), ibuffer.data());
456 running[block[c]].add(range.value, range.index, range.number);
459 for (
decltype(I(nblocks)) b = 0; b < nblocks; ++b) {
464 std::vector<tatami_stats::variances::RunningDense<Scalar, Value_, Index_> > running;
465 running.reserve(nblocks);
466 for (
decltype(I(nblocks)) b = 0; b < nblocks; ++b) {
467 running.emplace_back(length, re_centers[b].data(), re_variances[b].data(),
false);
471 for (Index_ c = 0; c < ncells; ++c) {
472 auto ptr = ext->fetch(vbuffer.data());
473 running[block[c]].add(ptr);
476 for (
decltype(I(nblocks)) b = 0; b < nblocks; ++b) {
481 static_assert(!EigenMatrix_::IsRowMajor);
482 for (Index_ i = 0; i < length; ++i) {
483 auto mptr = centers.data() + sanisizer::product_unsafe<std::size_t>(start + i, nblocks);
484 for (
decltype(I(nblocks)) b = 0; b < nblocks; ++b) {
485 mptr[b] = re_centers[b][i];
488 auto& my_var = variances[start + i];
490 for (
const auto& bm : block_multipliers) {
491 my_var += re_variances[bm.first][i] * bm.second;
493 my_var /= ncells - 1;
495 }, ngenes, nthreads);
503template<
class EigenMatrix_,
class EigenVector_>
504const EigenMatrix_& scale_rotation_matrix(
const EigenMatrix_& rotation,
bool scale,
const EigenVector_& scale_v, EigenMatrix_& tmp) {
506 tmp = (rotation.array().colwise() / scale_v.array()).matrix();
513template<
class IrlbaSparseMatrix_,
class EigenMatrix_>
514inline void project_matrix_realized_sparse(
515 const IrlbaSparseMatrix_& emat,
516 EigenMatrix_& components,
517 const EigenMatrix_& scaled_rotation,
520 const auto rank = scaled_rotation.cols();
521 const auto ncells = emat.rows();
522 const auto ngenes = emat.cols();
526 sanisizer::cast<
decltype(I(components.rows()))>(rank),
527 sanisizer::cast<
decltype(I(components.cols()))>(ncells)
529 components.setZero();
531 const auto& values = emat.get_values();
532 const auto& indices = emat.get_indices();
535 const auto& pointers = emat.get_pointers();
536 auto multipliers = sanisizer::create<Eigen::VectorXd>(rank);
537 for (
decltype(I(ngenes)) g = 0; g < ngenes; ++g) {
538 multipliers.noalias() = scaled_rotation.row(g);
539 const auto start = pointers[g], end = pointers[g + 1];
540 for (
auto i = start; i < end; ++i) {
541 components.col(indices[i]).noalias() += values[i] * multipliers;
546 const auto& row_nonzero_starts = emat.get_secondary_nonzero_starts();
548 const auto& starts = row_nonzero_starts[t];
549 const auto& ends = row_nonzero_starts[t + 1];
550 auto multipliers = sanisizer::create<Eigen::VectorXd>(rank);
552 for (
decltype(I(ngenes)) g = 0; g < ngenes; ++g) {
553 multipliers.noalias() = scaled_rotation.row(g);
554 const auto start = starts[g], end = ends[g];
555 for (
auto i = start; i < end; ++i) {
556 components.col(indices[i]).noalias() += values[i] * multipliers;
563template<
typename Value_,
typename Index_,
class EigenMatrix_>
564void project_matrix_transposed_tatami(
566 EigenMatrix_& components,
567 const EigenMatrix_& scaled_rotation,
570 const auto rank = scaled_rotation.cols();
571 const auto ngenes = mat.
nrow();
572 const auto ncells = mat.
ncol();
573 typedef typename EigenMatrix_::Scalar Scalar;
577 sanisizer::cast<
decltype(I(components.rows()))>(rank),
578 sanisizer::cast<
decltype(I(components.cols()))>(ncells)
583 static_assert(!EigenMatrix_::IsRowMajor);
584 const auto vptr = scaled_rotation.data();
587 std::vector<std::vector<Scalar> > local_buffers;
588 local_buffers.reserve(rank);
589 for (
decltype(I(rank)) r = 0; r < rank; ++r) {
596 for (Index_ g = 0; g < ngenes; ++g) {
597 const auto range = ext->fetch(vbuffer.data(), ibuffer.data());
598 for (
decltype(I(rank)) r = 0; r < rank; ++r) {
599 const auto mult = vptr[sanisizer::nd_offset<std::size_t>(g, ngenes, r)];
600 auto& local_buffer = local_buffers[r];
601 for (Index_ i = 0; i < range.number; ++i) {
602 local_buffer[range.index[i] - start] += range.value[i] * mult;
609 for (Index_ g = 0; g < ngenes; ++g) {
610 const auto ptr = ext->fetch(vbuffer.data());
611 for (
decltype(I(rank)) r = 0; r < rank; ++r) {
612 const auto mult = vptr[sanisizer::nd_offset<std::size_t>(g, ngenes, r)];
613 auto& local_buffer = local_buffers[r];
614 for (Index_ i = 0; i < length; ++i) {
615 local_buffer[i] += ptr[i] * mult;
621 for (
decltype(I(rank)) r = 0; r < rank; ++r) {
622 for (Index_ c = 0; c < length; ++c) {
623 components.coeffRef(r, c + start) = local_buffers[r][c];
627 }, ncells, nthreads);
631 static_assert(!EigenMatrix_::IsRowMajor);
635 std::vector<Index_> ibuffer(ngenes);
638 for (Index_ c = start, end = start + length; c < end; ++c) {
639 const auto range = ext->fetch(vbuffer.data(), ibuffer.data());
640 static_assert(!EigenMatrix_::IsRowMajor);
641 for (
decltype(I(rank)) r = 0; r < rank; ++r) {
642 auto& output = components.coeffRef(r, c);
644 const auto rotptr = scaled_rotation.data() + sanisizer::product_unsafe<std::size_t>(r, ngenes);
645 for (Index_ i = 0; i < range.number; ++i) {
646 output += rotptr[range.index[i]] * range.value[i];
653 for (Index_ c = start, end = start + length; c < end; ++c) {
654 const auto ptr = ext->fetch(vbuffer.data());
655 static_assert(!EigenMatrix_::IsRowMajor);
656 for (
decltype(I(rank)) r = 0; r < rank; ++r) {
657 const auto rotptr = scaled_rotation.data() + sanisizer::product_unsafe<std::size_t>(r, ngenes);
658 components.coeffRef(r, c) = std::inner_product(rotptr, rotptr + ngenes, ptr,
static_cast<Scalar
>(0));
662 }, ncells, nthreads);
666template<
class EigenMatrix_,
class EigenVector_>
667void clean_up_projected(EigenMatrix_& projected, EigenVector_& D) {
670 for (
decltype(I(projected.rows())) i = 0, prows = projected.rows(); i < prows; ++i) {
671 projected.row(i).array() -= projected.row(i).sum() / projected.cols();
675 const typename EigenMatrix_::Scalar denom = projected.cols() - 1;
687template<
class Matrix_,
typename Block_,
class EigenMatrix_,
class EigenVector_>
688class ResidualWrapper {
690 ResidualWrapper(
const Matrix_& mat,
const Block_* block,
const EigenMatrix_& means) : my_mat(mat), my_block(block), my_means(means) {}
693 Eigen::Index rows()
const {
return my_mat.rows(); }
694 Eigen::Index cols()
const {
return my_mat.cols(); }
698 template<
typename NumBlocks_>
700 sub(sanisizer::cast<decltype(I(sub.size()))>(nblocks)),
705 EigenVector_ holding;
709 Workspace workspace()
const {
713 template<
class Right_>
714 void multiply(
const Right_& rhs, Workspace& work, EigenVector_& output)
const {
715 const auto& realized_rhs = [&]() ->
const auto& {
716 if constexpr(std::is_same<Right_, EigenVector_>::value) {
719 work.holding.noalias() = rhs;
726 work.sub.noalias() = my_means * realized_rhs;
727 for (
decltype(I(output.size())) i = 0, end = output.size(); i < end; ++i) {
728 auto& val = output.coeffRef(i);
729 val -= work.sub.coeff(my_block[i]);
734 struct AdjointWorkspace {
735 template<
typename NumBlocks_>
737 aggr(sanisizer::cast<decltype(I(aggr.size()))>(nblocks)),
742 EigenVector_ holding;
746 AdjointWorkspace adjoint_workspace()
const {
750 template<
class Right_>
751 void adjoint_multiply(
const Right_& rhs, AdjointWorkspace& work, EigenVector_& output)
const {
752 const auto& realized_rhs = [&]() {
753 if constexpr(std::is_same<Right_, EigenVector_>::value) {
756 work.holding.noalias() = rhs;
764 for (
decltype(I(realized_rhs.size())) i = 0, end = realized_rhs.size(); i < end; ++i) {
765 work.aggr.coeffRef(my_block[i]) += realized_rhs.coeff(i);
768 output.noalias() -= my_means.adjoint() * work.aggr;
772 template<
class EigenMatrix2_>
773 EigenMatrix2_ realize()
const {
775 for (
decltype(I(output.rows())) i = 0, end = output.rows(); i < end; ++i) {
776 output.row(i) -= my_means.row(my_block[i]);
782 const Matrix_& my_mat;
783 const Block_* my_block;
784 const EigenMatrix_& my_means;
791template<
bool realize_matrix_,
bool sparse_,
typename Value_,
typename Index_,
typename Block_,
class EigenMatrix_,
class EigenVector_>
795 const BlockingDetails<Index_, EigenVector_>& block_details,
796 const BlockedPcaOptions& options,
797 EigenMatrix_& components,
798 EigenMatrix_& rotation,
799 EigenVector_& variance_explained,
800 EigenMatrix_& center_m,
801 EigenVector_& scale_v,
802 typename EigenVector_::Scalar& total_var,
805 Index_ ngenes = mat.
nrow(), ncells = mat.
ncol();
808 if constexpr(!realize_matrix_) {
809 return internal::TransposedTatamiWrapper<EigenVector_, Value_, Index_>(mat, options.num_threads);
811 }
else if constexpr(sparse_) {
823 return irlba::ParallelSparseMatrix(ncells, ngenes, std::move(extracted.value), std::move(extracted.index), std::move(extracted.pointers),
true, options.num_threads);
828 sanisizer::cast<
decltype(I(std::declval<EigenMatrix_>().rows()))>(ncells),
829 sanisizer::cast<
decltype(I(std::declval<EigenMatrix_>().cols()))>(ngenes)
831 static_assert(!EigenMatrix_::IsRowMajor);
837 tatami::ConvertToDenseOptions opt;
838 opt.num_threads = options.num_threads;
846 const auto nblocks = block_details.block_size.size();
848 sanisizer::cast<
decltype(I(center_m.rows()))>(nblocks),
849 sanisizer::cast<
decltype(I(center_m.cols()))>(ngenes)
851 sanisizer::resize(scale_v, ngenes);
853 if constexpr(!realize_matrix_) {
854 compute_blockwise_mean_and_variance_tatami(mat, block, block_details, center_m, scale_v, options.num_threads);
855 }
else if constexpr(sparse_) {
856 compute_blockwise_mean_and_variance_realized_sparse(emat, block, block_details, center_m, scale_v, options.num_threads);
858 compute_blockwise_mean_and_variance_realized_dense(emat, block, block_details, center_m, scale_v, options.num_threads);
860 total_var = internal::process_scale_vector(options.scale, scale_v);
862 ResidualWrapper<
decltype(I(emat)), Block_, EigenMatrix_, EigenVector_> centered(emat, block, center_m);
864 if (block_details.weighted) {
866 irlba::Scaled<
true,
decltype(I(centered)), EigenVector_> scaled(centered, scale_v,
true);
867 irlba::Scaled<
false,
decltype(I(scaled)), EigenVector_> weighted(scaled, block_details.expanded_weights,
false);
868 auto out =
irlba::compute(weighted, options.number, components, rotation, variance_explained, options.irlba_options);
869 converged = out.first;
871 irlba::Scaled<
false,
decltype(I(centered)), EigenVector_> weighted(centered, block_details.expanded_weights,
false);
872 auto out =
irlba::compute(weighted, options.number, components, rotation, variance_explained, options.irlba_options);
873 converged = out.first;
877 const auto& scaled_rotation = scale_rotation_matrix(rotation, options.scale, scale_v, tmp);
880 if constexpr(!realize_matrix_) {
881 project_matrix_transposed_tatami(mat, components, scaled_rotation, options.num_threads);
882 }
else if constexpr(sparse_) {
883 project_matrix_realized_sparse(emat, components, scaled_rotation, options.num_threads);
885 components.noalias() = (emat * scaled_rotation).adjoint();
889 if (options.components_from_residuals) {
890 EigenMatrix_ centering = (center_m * scaled_rotation).adjoint();
891 for (
decltype(I(ncells)) c =0 ; c < ncells; ++c) {
892 components.col(c) -= centering.col(block[c]);
896 clean_up_projected(components, variance_explained);
897 if (!options.transpose) {
898 components.adjointInPlace();
903 irlba::Scaled<
true,
decltype(I(centered)), EigenVector_> scaled(centered, scale_v,
true);
904 const auto out =
irlba::compute(scaled, options.number, components, rotation, variance_explained, options.irlba_options);
905 converged = out.first;
907 const auto out =
irlba::compute(centered, options.number, components, rotation, variance_explained, options.irlba_options);
908 converged = out.first;
911 if (options.components_from_residuals) {
912 internal::clean_up(mat.
ncol(), components, variance_explained);
913 if (options.transpose) {
914 components.adjointInPlace();
918 const auto& scaled_rotation = scale_rotation_matrix(rotation, options.scale, scale_v, tmp);
921 if constexpr(!realize_matrix_) {
922 project_matrix_transposed_tatami(mat, components, scaled_rotation, options.num_threads);
923 }
else if constexpr(sparse_) {
924 project_matrix_realized_sparse(emat, components, scaled_rotation, options.num_threads);
926 components.noalias() = (emat * scaled_rotation).adjoint();
929 clean_up_projected(components, variance_explained);
930 if (!options.transpose) {
931 components.adjointInPlace();
948template<
typename EigenMatrix_,
typename EigenVector_>
1044template<
typename Value_,
typename Index_,
typename Block_,
typename EigenMatrix_,
class EigenVector_>
1049 EigenMatrix_& components = output.
components;
1050 EigenMatrix_& rotation = output.
rotation;
1052 EigenMatrix_& center_m = output.
center;
1053 EigenVector_& scale_v = output.
scale;
1059 internal::run_blocked<true, true>(mat, block, bdetails, options, components, rotation, variance_explained, center_m, scale_v, total_var, converged);
1061 internal::run_blocked<false, true>(mat, block, bdetails, options, components, rotation, variance_explained, center_m, scale_v, total_var, converged);
1065 internal::run_blocked<true, false>(mat, block, bdetails, options, components, rotation, variance_explained, center_m, scale_v, total_var, converged);
1067 internal::run_blocked<false, false>(mat, block, bdetails, options, components, rotation, variance_explained, center_m, scale_v, total_var, converged);
1071 if (!options.
scale) {
1072 output.
scale = EigenVector_();
1095template<
typename EigenMatrix_ = Eigen::MatrixXd,
class EigenVector_ = Eigen::VectorXd,
typename Value_,
typename Index_,
typename Block_>
virtual Index_ ncol() const=0
virtual Index_ nrow() const=0
virtual bool prefer_rows() const=0
virtual bool is_sparse() const=0
virtual std::unique_ptr< MyopicSparseExtractor< Value_, Index_ > > sparse(bool row, const Options &opt) const=0
void wrapped_adjoint_multiply(const Matrix_ &matrix, const Right_ &rhs, WrappedAdjointWorkspace< Matrix_ > &work, EigenVector_ &out)
WrappedWorkspace< Matrix_ > wrapped_workspace(const Matrix_ &matrix)
std::pair< bool, int > compute(const Matrix_ &matrix, Eigen::Index number, EigenMatrix_ &outU, EigenMatrix_ &outV, EigenVector_ &outD, const Options &options)
void parallelize(Task_ num_tasks, Run_ run_task)
void wrapped_multiply(const Matrix_ &matrix, const Right_ &rhs, WrappedWorkspace< Matrix_ > &work, EigenVector_ &out)
WrappedAdjointWorkspace< Matrix_ > wrapped_adjoint_workspace(const Matrix_ &matrix)
typename get_adjoint_workspace< Matrix_ >::type WrappedAdjointWorkspace
EigenMatrix_ wrapped_realize(const Matrix_ &matrix)
typename get_workspace< Matrix_ >::type WrappedWorkspace
double compute_variable_weight(double s, const VariableWeightParameters ¶ms)
Principal component analysis on single-cell data.
void blocked_pca(const tatami::Matrix< Value_, Index_ > &mat, const Block_ *block, const BlockedPcaOptions &options, BlockedPcaResults< EigenMatrix_, EigenVector_ > &output)
Definition blocked_pca.hpp:1045
CompressedSparseContents< StoredValue_, StoredIndex_, StoredPointer_ > retrieve_compressed_sparse_contents(const Matrix< InputValue_, InputIndex_ > &matrix, bool row, const RetrieveCompressedSparseContentsOptions &options)
decltype(std::declval< Container_ >().size()) cast_Index_to_container_size(Index_ x)
void parallelize(Function_ fun, Index_ tasks, int threads)
Container_ create_container_of_Index_size(Index_ x, Args_ &&... args)
void convert_to_dense(const Matrix< InputValue_, InputIndex_ > &matrix, bool row_major, StoredValue_ *store, const ConvertToDenseOptions &options)
auto consecutive_extractor(const Matrix< Value_, Index_ > &matrix, bool row, Index_ iter_start, Index_ iter_length, Args_ &&... args)
Options for blocked_pca().
Definition blocked_pca.hpp:30
scran_blocks::VariableWeightParameters variable_block_weight_parameters
Definition blocked_pca.hpp:78
int num_threads
Definition blocked_pca.hpp:97
bool components_from_residuals
Definition blocked_pca.hpp:85
bool realize_matrix
Definition blocked_pca.hpp:91
irlba::Options irlba_options
Definition blocked_pca.hpp:102
bool transpose
Definition blocked_pca.hpp:61
scran_blocks::WeightPolicy block_weight_policy
Definition blocked_pca.hpp:72
int number
Definition blocked_pca.hpp:47
bool scale
Definition blocked_pca.hpp:55
Results of blocked_pca().
Definition blocked_pca.hpp:949
EigenVector_::Scalar total_variance
Definition blocked_pca.hpp:969
bool converged
Definition blocked_pca.hpp:994
EigenMatrix_ components
Definition blocked_pca.hpp:956
EigenMatrix_ rotation
Definition blocked_pca.hpp:976
EigenVector_ scale
Definition blocked_pca.hpp:989
EigenMatrix_ center
Definition blocked_pca.hpp:983
EigenVector_ variance_explained
Definition blocked_pca.hpp:963