1#ifndef SCRAN_PCA_BLOCKED_PCA_HPP
2#define SCRAN_PCA_BLOCKED_PCA_HPP
16#include "sanisizer/sanisizer.hpp"
113template<
typename Index_,
class EigenVector_>
114struct BlockingDetails {
115 std::vector<Index_> block_size;
117 bool weighted =
false;
118 typedef typename EigenVector_::Scalar Weight;
121 std::vector<Weight> per_element_weight;
122 Weight total_block_weight = 0;
123 EigenVector_ expanded_weights;
126template<
class EigenVector_,
typename Index_,
typename Block_>
127BlockingDetails<Index_, EigenVector_> compute_blocking_details(
133 BlockingDetails<Index_, EigenVector_> output;
134 output.block_size = tatami_stats::tabulate_groups(block, ncells);
135 if (block_weight_policy == scran_blocks::WeightPolicy::NONE) {
139 const auto& block_size = output.block_size;
140 const auto nblocks = block_size.size();
141 output.weighted =
true;
142 auto& total_weight = output.total_block_weight;
143 auto& element_weight = output.per_element_weight;
144 sanisizer::resize(element_weight, nblocks);
146 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
147 const auto bsize = block_size[b];
153 typename EigenVector_::Scalar block_weight = 1;
154 if (block_weight_policy == scran_blocks::WeightPolicy::VARIABLE) {
158 element_weight[b] = block_weight / bsize;
159 total_weight += block_weight;
161 element_weight[b] = 0;
166 if (total_weight == 0) {
171 auto sqrt_weights = element_weight;
172 for (
auto& s : sqrt_weights) {
176 auto& expanded = output.expanded_weights;
177 sanisizer::resize(expanded, ncells);
178 for (Index_ c = 0; c < ncells; ++c) {
179 expanded.coeffRef(c) = sqrt_weights[block[c]];
189template<
typename Num_,
typename Value_,
typename Index_,
typename Block_,
typename EigenVector_,
typename Float_>
190void compute_sparse_mean_and_variance_blocked(
191 const Num_ num_nonzero,
192 const Value_* values,
193 const Index_* indices,
195 const BlockingDetails<Index_, EigenVector_>& block_details,
198 std::vector<Index_>& block_copy,
201 const auto& block_size = block_details.block_size;
202 const auto nblocks = block_size.size();
204 std::fill_n(centers, nblocks, 0);
205 for (Num_ i = 0; i < num_nonzero; ++i) {
206 centers[block[indices[i]]] += values[i];
208 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
209 auto bsize = block_size[b];
220 std::copy(block_size.begin(), block_size.end(), block_copy.begin());
222 if (block_details.weighted) {
223 for (Num_ i = 0; i < num_nonzero; ++i) {
224 const Block_ curb = block[indices[i]];
225 const auto diff = values[i] - centers[curb];
226 variance += diff * diff * block_details.per_element_weight[curb];
229 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
230 const auto val = centers[b];
231 variance += val * val * block_copy[b] * block_details.per_element_weight[b];
234 for (Num_ i = 0; i < num_nonzero; ++i) {
235 const Block_ curb = block[indices[i]];
236 const auto diff = values[i] - centers[curb];
237 variance += diff * diff;
240 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
241 const auto val = centers[b];
242 variance += val * val * block_copy[b];
255 variance /= num_all - 1;
258template<
class IrlbaSparseMatrix_,
typename Block_,
class Index_,
class EigenVector_,
class EigenMatrix_>
259void compute_blockwise_mean_and_variance_realized_sparse(
260 const IrlbaSparseMatrix_& emat,
262 const BlockingDetails<Index_, EigenVector_>& block_details,
263 EigenMatrix_& centers,
264 EigenVector_& variances,
267 const auto ngenes = emat.cols();
268 tatami::parallelize([&](
const int,
const I<
decltype(ngenes)> start,
const I<
decltype(ngenes)> length) ->
void {
269 const auto ncells = emat.rows();
270 const auto& values = emat.get_values();
271 const auto& indices = emat.get_indices();
272 const auto& pointers = emat.get_pointers();
274 const auto nblocks = block_details.block_size.size();
275 static_assert(!EigenMatrix_::IsRowMajor);
276 auto block_copy = sanisizer::create<std::vector<Index_> >(nblocks);
278 for (I<
decltype(start)> g = start, end = start + length; g < end; ++g) {
279 const auto offset = pointers[g];
280 const auto next_offset = pointers[g + 1];
281 compute_sparse_mean_and_variance_blocked(
282 static_cast<I<decltype(ncells)
> >(next_offset - offset),
283 values.data() + offset,
284 indices.data() + offset,
287 centers.data() + sanisizer::product_unsafe<std::size_t>(g, nblocks),
293 }, ngenes, nthreads);
296template<
typename Num_,
typename Value_,
typename Block_,
typename Index_,
typename EigenVector_,
typename Float_>
297void compute_dense_mean_and_variance_blocked(
299 const Value_* values,
301 const BlockingDetails<Index_, EigenVector_>& block_details,
305 const auto& block_size = block_details.block_size;
306 const auto nblocks = block_size.size();
307 std::fill_n(centers, nblocks, 0);
308 for (Num_ i = 0; i < number; ++i) {
309 centers[block[i]] += values[i];
311 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
312 const auto& bsize = block_size[b];
320 if (block_details.weighted) {
321 for (Num_ i = 0; i < number; ++i) {
322 const auto curb = block[i];
323 const auto delta = values[i] - centers[curb];
324 variance += delta * delta * block_details.per_element_weight[curb];
327 for (Num_ i = 0; i < number; ++i) {
328 const auto curb = block[i];
329 const auto delta = values[i] - centers[curb];
330 variance += delta * delta;
334 variance /= number - 1;
337template<
class EigenMatrix_,
typename Block_,
class Index_,
class EigenVector_>
338void compute_blockwise_mean_and_variance_realized_dense(
339 const EigenMatrix_& emat,
341 const BlockingDetails<Index_, EigenVector_>& block_details,
342 EigenMatrix_& centers,
343 EigenVector_& variances,
346 const auto ngenes = emat.cols();
347 tatami::parallelize([&](
const int,
const I<
decltype(ngenes)> start,
const I<
decltype(ngenes)> length) ->
void {
348 const auto ncells = emat.rows();
349 static_assert(!EigenMatrix_::IsRowMajor);
350 const auto nblocks = block_details.block_size.size();
351 for (I<
decltype(start)> g = start, end = start + length; g < end; ++g) {
352 compute_dense_mean_and_variance_blocked(
354 emat.data() + sanisizer::product_unsafe<std::size_t>(g, ncells),
357 centers.data() + sanisizer::product_unsafe<std::size_t>(g, nblocks),
361 }, ngenes, nthreads);
364template<
typename Value_,
typename Index_,
typename Block_,
class EigenMatrix_,
class EigenVector_>
365void compute_blockwise_mean_and_variance_tatami(
368 const BlockingDetails<Index_, EigenVector_>& block_details,
369 EigenMatrix_& centers,
370 EigenVector_& variances,
373 const auto& block_size = block_details.block_size;
374 const auto nblocks = block_size.size();
375 const Index_ ngenes = mat.
nrow();
376 const Index_ ncells = mat.
ncol();
380 static_assert(!EigenMatrix_::IsRowMajor);
381 auto block_copy = sanisizer::create<std::vector<Index_> >(nblocks);
387 for (Index_ g = start, end = start + length; g < end; ++g) {
388 auto range = ext->fetch(vbuffer.data(), ibuffer.data());
389 compute_sparse_mean_and_variance_blocked(
395 centers.data() + sanisizer::product_unsafe<std::size_t>(g, nblocks),
404 for (Index_ g = start, end = start + length; g < end; ++g) {
405 auto ptr = ext->fetch(vbuffer.data());
406 compute_dense_mean_and_variance_blocked(
411 centers.data() + sanisizer::product_unsafe<std::size_t>(g, nblocks),
416 }, ngenes, nthreads);
419 typedef typename EigenVector_::Scalar Scalar;
420 std::vector<std::pair<I<
decltype(nblocks)>, Scalar> > block_multipliers;
421 block_multipliers.reserve(nblocks);
423 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
424 const auto bsize = block_size[b];
426 Scalar mult = bsize - 1;
427 if (block_details.weighted) {
428 mult *= block_details.per_element_weight[b];
430 block_multipliers.emplace_back(b, mult);
435 std::vector<std::vector<Scalar> > re_centers, re_variances;
436 re_centers.reserve(nblocks);
437 re_variances.reserve(nblocks);
438 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
439 re_centers.emplace_back(length);
440 re_variances.emplace_back(length);
446 std::vector<tatami_stats::variances::RunningSparse<Scalar, Value_, Index_> > running;
447 running.reserve(nblocks);
448 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
449 running.emplace_back(length, re_centers[b].data(), re_variances[b].data(),
false, start);
454 for (Index_ c = 0; c < ncells; ++c) {
455 const auto range = ext->fetch(vbuffer.data(), ibuffer.data());
456 running[block[c]].add(range.value, range.index, range.number);
459 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
464 std::vector<tatami_stats::variances::RunningDense<Scalar, Value_, Index_> > running;
465 running.reserve(nblocks);
466 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
467 running.emplace_back(length, re_centers[b].data(), re_variances[b].data(),
false);
471 for (Index_ c = 0; c < ncells; ++c) {
472 auto ptr = ext->fetch(vbuffer.data());
473 running[block[c]].add(ptr);
476 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
481 static_assert(!EigenMatrix_::IsRowMajor);
482 for (Index_ i = 0; i < length; ++i) {
483 auto mptr = centers.data() + sanisizer::product_unsafe<std::size_t>(start + i, nblocks);
484 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
485 mptr[b] = re_centers[b][i];
488 auto& my_var = variances[start + i];
490 for (
const auto& bm : block_multipliers) {
491 my_var += re_variances[bm.first][i] * bm.second;
493 my_var /= ncells - 1;
495 }, ngenes, nthreads);
503template<
class EigenMatrix_,
class EigenVector_>
504const EigenMatrix_& scale_rotation_matrix(
const EigenMatrix_& rotation,
bool scale,
const EigenVector_& scale_v, EigenMatrix_& tmp) {
506 tmp = (rotation.array().colwise() / scale_v.array()).matrix();
513template<
class IrlbaSparseMatrix_,
class EigenMatrix_>
514inline void project_matrix_realized_sparse(
515 const IrlbaSparseMatrix_& emat,
516 EigenMatrix_& components,
517 const EigenMatrix_& scaled_rotation,
520 const auto rank = scaled_rotation.cols();
521 const auto ncells = emat.rows();
522 const auto ngenes = emat.cols();
526 sanisizer::cast<I<
decltype(components.rows())> >(rank),
527 sanisizer::cast<I<
decltype(components.cols())> >(ncells)
529 components.setZero();
531 const auto& values = emat.get_values();
532 const auto& indices = emat.get_indices();
535 const auto& pointers = emat.get_pointers();
536 auto multipliers = sanisizer::create<Eigen::VectorXd>(rank);
537 for (I<
decltype(ngenes)> g = 0; g < ngenes; ++g) {
538 multipliers.noalias() = scaled_rotation.row(g);
539 const auto start = pointers[g], end = pointers[g + 1];
540 for (
auto i = start; i < end; ++i) {
541 components.col(indices[i]).noalias() += values[i] * multipliers;
546 const auto& row_nonzero_bounds = emat.get_secondary_nonzero_boundaries();
548 const auto& starts = row_nonzero_bounds[t];
549 const auto& ends = row_nonzero_bounds[t + 1];
550 auto multipliers = sanisizer::create<Eigen::VectorXd>(rank);
552 for (I<
decltype(ngenes)> g = 0; g < ngenes; ++g) {
553 multipliers.noalias() = scaled_rotation.row(g);
554 const auto start = starts[g], end = ends[g];
555 for (
auto i = start; i < end; ++i) {
556 components.col(indices[i]).noalias() += values[i] * multipliers;
563template<
typename Value_,
typename Index_,
class EigenMatrix_>
564void project_matrix_transposed_tatami(
566 EigenMatrix_& components,
567 const EigenMatrix_& scaled_rotation,
570 const auto rank = scaled_rotation.cols();
571 const auto ngenes = mat.
nrow();
572 const auto ncells = mat.
ncol();
573 typedef typename EigenMatrix_::Scalar Scalar;
577 sanisizer::cast<I<
decltype(components.rows())> >(rank),
578 sanisizer::cast<I<
decltype(components.cols())> >(ncells)
583 static_assert(!EigenMatrix_::IsRowMajor);
584 const auto vptr = scaled_rotation.data();
587 std::vector<std::vector<Scalar> > local_buffers;
588 local_buffers.reserve(rank);
589 for (I<
decltype(rank)> r = 0; r < rank; ++r) {
596 for (Index_ g = 0; g < ngenes; ++g) {
597 const auto range = ext->fetch(vbuffer.data(), ibuffer.data());
598 for (I<
decltype(rank)> r = 0; r < rank; ++r) {
599 const auto mult = vptr[sanisizer::nd_offset<std::size_t>(g, ngenes, r)];
600 auto& local_buffer = local_buffers[r];
601 for (Index_ i = 0; i < range.number; ++i) {
602 local_buffer[range.index[i] - start] += range.value[i] * mult;
609 for (Index_ g = 0; g < ngenes; ++g) {
610 const auto ptr = ext->fetch(vbuffer.data());
611 for (I<
decltype(rank)> r = 0; r < rank; ++r) {
612 const auto mult = vptr[sanisizer::nd_offset<std::size_t>(g, ngenes, r)];
613 auto& local_buffer = local_buffers[r];
614 for (Index_ i = 0; i < length; ++i) {
615 local_buffer[i] += ptr[i] * mult;
621 for (I<
decltype(rank)> r = 0; r < rank; ++r) {
622 for (Index_ c = 0; c < length; ++c) {
623 components.coeffRef(r, c + start) = local_buffers[r][c];
627 }, ncells, nthreads);
631 static_assert(!EigenMatrix_::IsRowMajor);
635 std::vector<Index_> ibuffer(ngenes);
638 for (Index_ c = start, end = start + length; c < end; ++c) {
639 const auto range = ext->fetch(vbuffer.data(), ibuffer.data());
640 static_assert(!EigenMatrix_::IsRowMajor);
641 for (I<
decltype(rank)> r = 0; r < rank; ++r) {
642 auto& output = components.coeffRef(r, c);
644 const auto rotptr = scaled_rotation.data() + sanisizer::product_unsafe<std::size_t>(r, ngenes);
645 for (Index_ i = 0; i < range.number; ++i) {
646 output += rotptr[range.index[i]] * range.value[i];
653 for (Index_ c = start, end = start + length; c < end; ++c) {
654 const auto ptr = ext->fetch(vbuffer.data());
655 static_assert(!EigenMatrix_::IsRowMajor);
656 for (I<
decltype(rank)> r = 0; r < rank; ++r) {
657 const auto rotptr = scaled_rotation.data() + sanisizer::product_unsafe<std::size_t>(r, ngenes);
658 components.coeffRef(r, c) = std::inner_product(rotptr, rotptr + ngenes, ptr,
static_cast<Scalar
>(0));
662 }, ncells, nthreads);
666template<
class EigenMatrix_,
class EigenVector_>
667void clean_up_projected(EigenMatrix_& projected, EigenVector_& D) {
670 for (I<
decltype(projected.rows())> i = 0, prows = projected.rows(); i < prows; ++i) {
671 projected.row(i).array() -= projected.row(i).sum() / projected.cols();
675 const typename EigenMatrix_::Scalar denom = projected.cols() - 1;
685template<
class EigenVector_,
class IrlbaMatrix_,
typename Block_,
class CenterMatrix_>
688 ResidualWorkspace(
const IrlbaMatrix_& matrix,
const Block_* block,
const CenterMatrix_& means) :
689 my_work(matrix.new_known_workspace()),
692 my_sub(sanisizer::cast<I<decltype(my_sub.size())> >(my_means.rows()))
696 I<decltype(std::declval<IrlbaMatrix_>().new_known_workspace())> my_work;
697 const Block_* my_block;
698 const CenterMatrix_& my_means;
702 void multiply(
const EigenVector_& right, EigenVector_& output) {
703 my_work->multiply(right, output);
705 my_sub.noalias() = my_means * right;
706 for (I<
decltype(output.size())> i = 0, end = output.size(); i < end; ++i) {
707 auto& val = output.coeffRef(i);
708 val -= my_sub.coeff(my_block[i]);
713template<
class EigenVector_,
class IrlbaMatrix_,
typename Block_,
class CenterMatrix_>
716 ResidualAdjointWorkspace(
const IrlbaMatrix_& matrix,
const Block_* block,
const CenterMatrix_& means) :
717 my_work(matrix.new_known_adjoint_workspace()),
720 my_aggr(sanisizer::cast<I<decltype(my_aggr.size())> >(my_means.rows()))
724 I<decltype(std::declval<IrlbaMatrix_>().new_known_adjoint_workspace())> my_work;
725 const Block_* my_block;
726 const CenterMatrix_& my_means;
727 EigenVector_ my_aggr;
730 void multiply(
const EigenVector_& right, EigenVector_& output) {
731 my_work->multiply(right, output);
734 for (I<
decltype(right.size())> i = 0, end = right.size(); i < end; ++i) {
735 my_aggr.coeffRef(my_block[i]) += right.coeff(i);
738 output.noalias() -= my_means.adjoint() * my_aggr;
742template<
class EigenMatrix_,
class IrlbaMatrix_,
typename Block_,
class CenterMatrix_>
745 ResidualRealizeWorkspace(
const IrlbaMatrix_& matrix,
const Block_* block,
const CenterMatrix_& means) :
746 my_work(matrix.new_known_realize_workspace()),
752 I<decltype(std::declval<IrlbaMatrix_>().new_known_realize_workspace())> my_work;
753 const Block_* my_block;
754 const CenterMatrix_& my_means;
757 const EigenMatrix_& realize(EigenMatrix_& buffer) {
758 my_work->realize_copy(buffer);
759 for (I<
decltype(buffer.rows())> i = 0, end = buffer.rows(); i < end; ++i) {
760 buffer.row(i) -= my_means.row(my_block[i]);
768template<
class EigenVector_,
class EigenMatrix_,
class IrlbaMatrixPo
inter_,
class Block_,
class CenterMatrixPo
inter_>
769class ResidualMatrix final :
public irlba::Matrix<EigenVector_, EigenMatrix_> {
771 ResidualMatrix(IrlbaMatrixPointer_ mat,
const Block_* block, CenterMatrixPointer_ means) :
772 my_matrix(std::move(mat)),
774 my_means(std::move(means))
778 Eigen::Index rows()
const {
779 return my_matrix->rows();
782 Eigen::Index cols()
const {
783 return my_matrix->cols();
787 IrlbaMatrixPointer_ my_matrix;
788 const Block_* my_block;
789 CenterMatrixPointer_ my_means;
792 std::unique_ptr<irlba::Workspace<EigenVector_> > new_workspace()
const {
793 return new_known_workspace();
796 std::unique_ptr<irlba::AdjointWorkspace<EigenVector_> > new_adjoint_workspace()
const {
797 return new_known_adjoint_workspace();
800 std::unique_ptr<irlba::RealizeWorkspace<EigenMatrix_> > new_realize_workspace()
const {
801 return new_known_realize_workspace();
805 std::unique_ptr<ResidualWorkspace<EigenVector_,
decltype(*my_matrix), Block_,
decltype(*my_means)> > new_known_workspace()
const {
806 return std::make_unique<ResidualWorkspace<EigenVector_,
decltype(*my_matrix), Block_,
decltype(*my_means)> >(*my_matrix, my_block, *my_means);
809 std::unique_ptr<ResidualAdjointWorkspace<EigenVector_,
decltype(*my_matrix), Block_,
decltype(*my_means)> > new_known_adjoint_workspace()
const {
810 return std::make_unique<ResidualAdjointWorkspace<EigenVector_,
decltype(*my_matrix), Block_,
decltype(*my_means)> >(*my_matrix, my_block, *my_means);
813 std::unique_ptr<ResidualRealizeWorkspace<EigenMatrix_,
decltype(*my_matrix), Block_,
decltype(*my_means)> > new_known_realize_workspace()
const {
814 return std::make_unique<ResidualRealizeWorkspace<EigenMatrix_,
decltype(*my_matrix), Block_,
decltype(*my_means)> >(*my_matrix, my_block, *my_means);
827template<
typename EigenMatrix_,
typename EigenVector_>
923template<
typename Value_,
typename Index_,
typename Block_,
typename EigenMatrix_,
class EigenVector_>
928 const Index_ ngenes = mat.
nrow(), ncells = mat.
ncol();
929 const auto nblocks = block_details.block_size.size();
931 sanisizer::cast<I<
decltype(output.
center.rows())> >(nblocks),
932 sanisizer::cast<I<
decltype(output.
center.cols())> >(ngenes)
934 sanisizer::resize(output.
scale, ngenes);
936 std::unique_ptr<irlba::Matrix<EigenVector_, EigenMatrix_> > ptr;
937 std::function<void(
const EigenMatrix_&)> projector;
940 ptr.reset(
new TransposedTatamiWrapperMatrix<EigenVector_, EigenMatrix_, Value_, Index_>(mat, options.
num_threads));
941 compute_blockwise_mean_and_variance_tatami(mat, block, block_details, output.
center, output.
scale, options.
num_threads);
943 projector = [&](
const EigenMatrix_& scaled_rotation) ->
void {
947 }
else if (mat.
sparse()) {
966 I<
decltype(extracted.value)>,
967 I<
decltype(extracted.index)>,
968 I<
decltype(extracted.pointers)>
972 std::move(extracted.value),
973 std::move(extracted.index),
974 std::move(extracted.pointers),
978 ptr.reset(sparse_ptr);
980 compute_blockwise_mean_and_variance_realized_sparse(*sparse_ptr, block, block_details, output.
center, output.
scale, options.
num_threads);
983 projector = [&,sparse_ptr](
const EigenMatrix_& scaled_rotation) ->
void {
984 project_matrix_realized_sparse(*sparse_ptr, output.
components, scaled_rotation, options.
num_threads);
989 auto tmp_ptr = std::make_unique<EigenMatrix_>(
990 sanisizer::cast<I<
decltype(std::declval<EigenMatrix_>().rows())> >(ncells),
991 sanisizer::cast<I<
decltype(std::declval<EigenMatrix_>().cols())> >(ngenes)
993 static_assert(!EigenMatrix_::IsRowMajor);
1000 tatami::ConvertToDenseOptions opt;
1001 opt.num_threads = options.num_threads;
1006 compute_blockwise_mean_and_variance_realized_dense(*tmp_ptr, block, block_details, output.
center, output.
scale, options.
num_threads);
1007 const auto dense_ptr = tmp_ptr.get();
1008 ptr.reset(
new irlba::SimpleMatrix<EigenVector_, EigenMatrix_,
decltype(tmp_ptr)>(std::move(tmp_ptr)));
1011 projector = [&,dense_ptr](
const EigenMatrix_& scaled_rotation) ->
void {
1012 output.
components.noalias() = (*dense_ptr * scaled_rotation).adjoint();
1018 std::unique_ptr<irlba::Matrix<EigenVector_, EigenMatrix_> > alt;
1025 I<
decltype(&(output.
center))>
1034 if (options.
scale) {
1040 I<
decltype(&(output.
scale))>
1051 if (block_details.weighted) {
1057 I<
decltype(&(block_details.expanded_weights))>
1060 &(block_details.expanded_weights),
1071 const auto& scaled_rotation = scale_rotation_matrix(output.
rotation, options.
scale, output.
scale, tmp);
1072 projector(scaled_rotation);
1076 EigenMatrix_ centering = (output.
center * scaled_rotation).adjoint();
1077 for (I<
decltype(ncells)> c =0 ; c < ncells; ++c) {
1078 output.
components.col(c) -= centering.col(block[c]);
1099 const auto& scaled_rotation = scale_rotation_matrix(output.
rotation, options.
scale, output.
scale, tmp);
1100 projector(scaled_rotation);
1109 if (!options.
scale) {
1110 output.
scale = EigenVector_();
1133template<
typename EigenMatrix_ = Eigen::MatrixXd,
class EigenVector_ = Eigen::VectorXd,
typename Value_,
typename Index_,
typename Block_>
virtual Index_ ncol() const=0
virtual Index_ nrow() const=0
virtual bool prefer_rows() const=0
virtual bool is_sparse() const=0
virtual std::unique_ptr< MyopicSparseExtractor< Value_, Index_ > > sparse(bool row, const Options &opt) const=0
void parallelize(Task_ num_tasks, Run_ run_task)
std::pair< bool, int > compute(const Matrix_ &matrix, const Eigen::Index number, EigenMatrix_ &outU, EigenMatrix_ &outV, EigenVector_ &outD, const Options< EigenVector_ > &options)
double compute_variable_weight(const double s, const VariableWeightParameters ¶ms)
Principal component analysis on single-cell data.
void blocked_pca(const tatami::Matrix< Value_, Index_ > &mat, const Block_ *block, const BlockedPcaOptions &options, BlockedPcaResults< EigenMatrix_, EigenVector_ > &output)
Definition blocked_pca.hpp:924
void parallelize(Function_ fun, const Index_ tasks, const int threads)
CompressedSparseContents< StoredValue_, StoredIndex_, StoredPointer_ > retrieve_compressed_sparse_contents(const Matrix< InputValue_, InputIndex_ > &matrix, const bool row, const RetrieveCompressedSparseContentsOptions &options)
void convert_to_dense(const Matrix< InputValue_, InputIndex_ > &matrix, const bool row_major, StoredValue_ *const store, const ConvertToDenseOptions &options)
I< decltype(std::declval< Container_ >().size())> cast_Index_to_container_size(const Index_ x)
Container_ create_container_of_Index_size(const Index_ x, Args_ &&... args)
auto consecutive_extractor(const Matrix< Value_, Index_ > &matrix, const bool row, const Index_ iter_start, const Index_ iter_length, Args_ &&... args)
Options for blocked_pca().
Definition blocked_pca.hpp:31
scran_blocks::VariableWeightParameters variable_block_weight_parameters
Definition blocked_pca.hpp:79
int num_threads
Definition blocked_pca.hpp:98
bool components_from_residuals
Definition blocked_pca.hpp:86
bool realize_matrix
Definition blocked_pca.hpp:92
irlba::Options< Eigen::VectorXd > irlba_options
Definition blocked_pca.hpp:103
bool transpose
Definition blocked_pca.hpp:62
scran_blocks::WeightPolicy block_weight_policy
Definition blocked_pca.hpp:73
int number
Definition blocked_pca.hpp:48
bool scale
Definition blocked_pca.hpp:56
Results of blocked_pca().
Definition blocked_pca.hpp:828
EigenVector_::Scalar total_variance
Definition blocked_pca.hpp:848
bool converged
Definition blocked_pca.hpp:873
EigenMatrix_ components
Definition blocked_pca.hpp:835
EigenMatrix_ rotation
Definition blocked_pca.hpp:855
EigenVector_ scale
Definition blocked_pca.hpp:868
EigenMatrix_ center
Definition blocked_pca.hpp:862
EigenVector_ variance_explained
Definition blocked_pca.hpp:842