1#ifndef SCRAN_PCA_BLOCKED_PCA_HPP
2#define SCRAN_PCA_BLOCKED_PCA_HPP
16#include "sanisizer/sanisizer.hpp"
112template<
typename Index_,
class EigenVector_>
113struct BlockingDetails {
114 std::vector<Index_> block_size;
116 bool weighted =
false;
117 typedef typename EigenVector_::Scalar Weight;
120 std::vector<Weight> per_element_weight;
121 Weight total_block_weight = 0;
122 EigenVector_ expanded_weights;
125template<
class EigenVector_,
typename Index_,
typename Block_>
126BlockingDetails<Index_, EigenVector_> compute_blocking_details(
132 BlockingDetails<Index_, EigenVector_> output;
133 output.block_size = tatami_stats::tabulate_groups(block, ncells);
134 if (block_weight_policy == scran_blocks::WeightPolicy::NONE) {
138 const auto& block_size = output.block_size;
139 const auto nblocks = block_size.size();
140 output.weighted =
true;
141 auto& total_weight = output.total_block_weight;
142 auto& element_weight = output.per_element_weight;
143 sanisizer::resize(element_weight, nblocks);
145 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
146 const auto bsize = block_size[b];
152 typename EigenVector_::Scalar block_weight = 1;
153 if (block_weight_policy == scran_blocks::WeightPolicy::VARIABLE) {
157 element_weight[b] = block_weight / bsize;
158 total_weight += block_weight;
160 element_weight[b] = 0;
165 if (total_weight == 0) {
170 auto sqrt_weights = element_weight;
171 for (
auto& s : sqrt_weights) {
175 auto& expanded = output.expanded_weights;
176 sanisizer::resize(expanded, ncells);
177 for (Index_ c = 0; c < ncells; ++c) {
178 expanded.coeffRef(c) = sqrt_weights[block[c]];
188template<
typename Num_,
typename Value_,
typename Index_,
typename Block_,
typename EigenVector_,
typename Float_>
189void compute_sparse_mean_and_variance_blocked(
190 const Num_ num_nonzero,
191 const Value_* values,
192 const Index_* indices,
194 const BlockingDetails<Index_, EigenVector_>& block_details,
197 std::vector<Index_>& block_copy,
200 const auto& block_size = block_details.block_size;
201 const auto nblocks = block_size.size();
203 std::fill_n(centers, nblocks, 0);
204 for (Num_ i = 0; i < num_nonzero; ++i) {
205 centers[block[indices[i]]] += values[i];
207 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
208 auto bsize = block_size[b];
219 std::copy(block_size.begin(), block_size.end(), block_copy.begin());
221 if (block_details.weighted) {
222 for (Num_ i = 0; i < num_nonzero; ++i) {
223 const Block_ curb = block[indices[i]];
224 const auto diff = values[i] - centers[curb];
225 variance += diff * diff * block_details.per_element_weight[curb];
228 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
229 const auto val = centers[b];
230 variance += val * val * block_copy[b] * block_details.per_element_weight[b];
233 for (Num_ i = 0; i < num_nonzero; ++i) {
234 const Block_ curb = block[indices[i]];
235 const auto diff = values[i] - centers[curb];
236 variance += diff * diff;
239 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
240 const auto val = centers[b];
241 variance += val * val * block_copy[b];
254 variance /= num_all - 1;
257template<
class IrlbaSparseMatrix_,
typename Block_,
class Index_,
class EigenVector_,
class EigenMatrix_>
258void compute_blockwise_mean_and_variance_realized_sparse(
259 const IrlbaSparseMatrix_& emat,
261 const BlockingDetails<Index_, EigenVector_>& block_details,
262 EigenMatrix_& centers,
263 EigenVector_& variances,
266 const auto ngenes = emat.cols();
267 tatami::parallelize([&](
const int,
const I<
decltype(ngenes)> start,
const I<
decltype(ngenes)> length) ->
void {
268 const auto ncells = emat.rows();
269 const auto& values = emat.get_values();
270 const auto& indices = emat.get_indices();
271 const auto& pointers = emat.get_pointers();
273 const auto nblocks = block_details.block_size.size();
274 static_assert(!EigenMatrix_::IsRowMajor);
275 auto block_copy = sanisizer::create<std::vector<Index_> >(nblocks);
277 for (I<
decltype(start)> g = start, end = start + length; g < end; ++g) {
278 const auto offset = pointers[g];
279 const auto next_offset = pointers[g + 1];
280 compute_sparse_mean_and_variance_blocked(
281 static_cast<I<decltype(ncells)
> >(next_offset - offset),
282 values.data() + offset,
283 indices.data() + offset,
286 centers.data() + sanisizer::product_unsafe<std::size_t>(g, nblocks),
292 }, ngenes, nthreads);
295template<
typename Num_,
typename Value_,
typename Block_,
typename Index_,
typename EigenVector_,
typename Float_>
296void compute_dense_mean_and_variance_blocked(
298 const Value_* values,
300 const BlockingDetails<Index_, EigenVector_>& block_details,
304 const auto& block_size = block_details.block_size;
305 const auto nblocks = block_size.size();
306 std::fill_n(centers, nblocks, 0);
307 for (Num_ i = 0; i < number; ++i) {
308 centers[block[i]] += values[i];
310 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
311 const auto& bsize = block_size[b];
319 if (block_details.weighted) {
320 for (Num_ i = 0; i < number; ++i) {
321 const auto curb = block[i];
322 const auto delta = values[i] - centers[curb];
323 variance += delta * delta * block_details.per_element_weight[curb];
326 for (Num_ i = 0; i < number; ++i) {
327 const auto curb = block[i];
328 const auto delta = values[i] - centers[curb];
329 variance += delta * delta;
333 variance /= number - 1;
336template<
class EigenMatrix_,
typename Block_,
class Index_,
class EigenVector_>
337void compute_blockwise_mean_and_variance_realized_dense(
338 const EigenMatrix_& emat,
340 const BlockingDetails<Index_, EigenVector_>& block_details,
341 EigenMatrix_& centers,
342 EigenVector_& variances,
345 const auto ngenes = emat.cols();
346 tatami::parallelize([&](
const int,
const I<
decltype(ngenes)> start,
const I<
decltype(ngenes)> length) ->
void {
347 const auto ncells = emat.rows();
348 static_assert(!EigenMatrix_::IsRowMajor);
349 const auto nblocks = block_details.block_size.size();
350 for (I<
decltype(start)> g = start, end = start + length; g < end; ++g) {
351 compute_dense_mean_and_variance_blocked(
353 emat.data() + sanisizer::product_unsafe<std::size_t>(g, ncells),
356 centers.data() + sanisizer::product_unsafe<std::size_t>(g, nblocks),
360 }, ngenes, nthreads);
363template<
typename Value_,
typename Index_,
typename Block_,
class EigenMatrix_,
class EigenVector_>
364void compute_blockwise_mean_and_variance_tatami(
367 const BlockingDetails<Index_, EigenVector_>& block_details,
368 EigenMatrix_& centers,
369 EigenVector_& variances,
372 const auto& block_size = block_details.block_size;
373 const auto nblocks = block_size.size();
374 const Index_ ngenes = mat.
nrow();
375 const Index_ ncells = mat.
ncol();
379 static_assert(!EigenMatrix_::IsRowMajor);
380 auto block_copy = sanisizer::create<std::vector<Index_> >(nblocks);
386 for (Index_ g = start, end = start + length; g < end; ++g) {
387 auto range = ext->fetch(vbuffer.data(), ibuffer.data());
388 compute_sparse_mean_and_variance_blocked(
394 centers.data() + sanisizer::product_unsafe<std::size_t>(g, nblocks),
403 for (Index_ g = start, end = start + length; g < end; ++g) {
404 auto ptr = ext->fetch(vbuffer.data());
405 compute_dense_mean_and_variance_blocked(
410 centers.data() + sanisizer::product_unsafe<std::size_t>(g, nblocks),
415 }, ngenes, nthreads);
418 typedef typename EigenVector_::Scalar Scalar;
419 std::vector<std::pair<I<
decltype(nblocks)>, Scalar> > block_multipliers;
420 block_multipliers.reserve(nblocks);
422 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
423 const auto bsize = block_size[b];
425 Scalar mult = bsize - 1;
426 if (block_details.weighted) {
427 mult *= block_details.per_element_weight[b];
429 block_multipliers.emplace_back(b, mult);
434 std::vector<std::vector<Scalar> > re_centers, re_variances;
435 re_centers.reserve(nblocks);
436 re_variances.reserve(nblocks);
437 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
438 re_centers.emplace_back(length);
439 re_variances.emplace_back(length);
445 std::vector<tatami_stats::variances::RunningSparse<Scalar, Value_, Index_> > running;
446 running.reserve(nblocks);
447 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
448 running.emplace_back(length, re_centers[b].data(), re_variances[b].data(),
false, start);
453 for (Index_ c = 0; c < ncells; ++c) {
454 const auto range = ext->fetch(vbuffer.data(), ibuffer.data());
455 running[block[c]].add(range.value, range.index, range.number);
458 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
463 std::vector<tatami_stats::variances::RunningDense<Scalar, Value_, Index_> > running;
464 running.reserve(nblocks);
465 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
466 running.emplace_back(length, re_centers[b].data(), re_variances[b].data(),
false);
470 for (Index_ c = 0; c < ncells; ++c) {
471 auto ptr = ext->fetch(vbuffer.data());
472 running[block[c]].add(ptr);
475 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
480 static_assert(!EigenMatrix_::IsRowMajor);
481 for (Index_ i = 0; i < length; ++i) {
482 auto mptr = centers.data() + sanisizer::product_unsafe<std::size_t>(start + i, nblocks);
483 for (I<
decltype(nblocks)> b = 0; b < nblocks; ++b) {
484 mptr[b] = re_centers[b][i];
487 auto& my_var = variances[start + i];
489 for (
const auto& bm : block_multipliers) {
490 my_var += re_variances[bm.first][i] * bm.second;
492 my_var /= ncells - 1;
494 }, ngenes, nthreads);
502template<
class EigenMatrix_,
class EigenVector_>
503const EigenMatrix_& scale_rotation_matrix(
const EigenMatrix_& rotation,
bool scale,
const EigenVector_& scale_v, EigenMatrix_& tmp) {
505 tmp = (rotation.array().colwise() / scale_v.array()).matrix();
512template<
class IrlbaSparseMatrix_,
class EigenMatrix_>
513inline void project_matrix_realized_sparse(
514 const IrlbaSparseMatrix_& emat,
515 EigenMatrix_& components,
516 const EigenMatrix_& scaled_rotation,
519 const auto rank = scaled_rotation.cols();
520 const auto ncells = emat.rows();
521 const auto ngenes = emat.cols();
525 sanisizer::cast<I<
decltype(components.rows())> >(rank),
526 sanisizer::cast<I<
decltype(components.cols())> >(ncells)
528 components.setZero();
530 const auto& values = emat.get_values();
531 const auto& indices = emat.get_indices();
534 const auto& pointers = emat.get_pointers();
535 auto multipliers = sanisizer::create<Eigen::VectorXd>(rank);
536 for (I<
decltype(ngenes)> g = 0; g < ngenes; ++g) {
537 multipliers.noalias() = scaled_rotation.row(g);
538 const auto start = pointers[g], end = pointers[g + 1];
539 for (
auto i = start; i < end; ++i) {
540 components.col(indices[i]).noalias() += values[i] * multipliers;
545 const auto& row_nonzero_bounds = emat.get_secondary_nonzero_boundaries();
547 const auto& starts = row_nonzero_bounds[t];
548 const auto& ends = row_nonzero_bounds[t + 1];
549 auto multipliers = sanisizer::create<Eigen::VectorXd>(rank);
551 for (I<
decltype(ngenes)> g = 0; g < ngenes; ++g) {
552 multipliers.noalias() = scaled_rotation.row(g);
553 const auto start = starts[g], end = ends[g];
554 for (
auto i = start; i < end; ++i) {
555 components.col(indices[i]).noalias() += values[i] * multipliers;
562template<
typename Value_,
typename Index_,
class EigenMatrix_>
563void project_matrix_transposed_tatami(
565 EigenMatrix_& components,
566 const EigenMatrix_& scaled_rotation,
569 const auto rank = scaled_rotation.cols();
570 const auto ngenes = mat.
nrow();
571 const auto ncells = mat.
ncol();
572 typedef typename EigenMatrix_::Scalar Scalar;
576 sanisizer::cast<I<
decltype(components.rows())> >(rank),
577 sanisizer::cast<I<
decltype(components.cols())> >(ncells)
582 static_assert(!EigenMatrix_::IsRowMajor);
583 const auto vptr = scaled_rotation.data();
586 std::vector<std::vector<Scalar> > local_buffers;
587 local_buffers.reserve(rank);
588 for (I<
decltype(rank)> r = 0; r < rank; ++r) {
595 for (Index_ g = 0; g < ngenes; ++g) {
596 const auto range = ext->fetch(vbuffer.data(), ibuffer.data());
597 for (I<
decltype(rank)> r = 0; r < rank; ++r) {
598 const auto mult = vptr[sanisizer::nd_offset<std::size_t>(g, ngenes, r)];
599 auto& local_buffer = local_buffers[r];
600 for (Index_ i = 0; i < range.number; ++i) {
601 local_buffer[range.index[i] - start] += range.value[i] * mult;
608 for (Index_ g = 0; g < ngenes; ++g) {
609 const auto ptr = ext->fetch(vbuffer.data());
610 for (I<
decltype(rank)> r = 0; r < rank; ++r) {
611 const auto mult = vptr[sanisizer::nd_offset<std::size_t>(g, ngenes, r)];
612 auto& local_buffer = local_buffers[r];
613 for (Index_ i = 0; i < length; ++i) {
614 local_buffer[i] += ptr[i] * mult;
620 for (I<
decltype(rank)> r = 0; r < rank; ++r) {
621 for (Index_ c = 0; c < length; ++c) {
622 components.coeffRef(r, c + start) = local_buffers[r][c];
626 }, ncells, nthreads);
630 static_assert(!EigenMatrix_::IsRowMajor);
634 std::vector<Index_> ibuffer(ngenes);
637 for (Index_ c = start, end = start + length; c < end; ++c) {
638 const auto range = ext->fetch(vbuffer.data(), ibuffer.data());
639 static_assert(!EigenMatrix_::IsRowMajor);
640 for (I<
decltype(rank)> r = 0; r < rank; ++r) {
641 auto& output = components.coeffRef(r, c);
643 const auto rotptr = scaled_rotation.data() + sanisizer::product_unsafe<std::size_t>(r, ngenes);
644 for (Index_ i = 0; i < range.number; ++i) {
645 output += rotptr[range.index[i]] * range.value[i];
652 for (Index_ c = start, end = start + length; c < end; ++c) {
653 const auto ptr = ext->fetch(vbuffer.data());
654 static_assert(!EigenMatrix_::IsRowMajor);
655 for (I<
decltype(rank)> r = 0; r < rank; ++r) {
656 const auto rotptr = scaled_rotation.data() + sanisizer::product_unsafe<std::size_t>(r, ngenes);
657 components.coeffRef(r, c) = std::inner_product(rotptr, rotptr + ngenes, ptr,
static_cast<Scalar
>(0));
661 }, ncells, nthreads);
665template<
class EigenMatrix_,
class EigenVector_>
666void clean_up_projected(EigenMatrix_& projected, EigenVector_& D) {
669 for (I<
decltype(projected.rows())> i = 0, prows = projected.rows(); i < prows; ++i) {
670 projected.row(i).array() -= projected.row(i).sum() / projected.cols();
674 const typename EigenMatrix_::Scalar denom = projected.cols() - 1;
684template<
class EigenVector_,
class IrlbaMatrix_,
typename Block_,
class CenterMatrix_>
687 ResidualWorkspace(
const IrlbaMatrix_& matrix,
const Block_* block,
const CenterMatrix_& means) :
688 my_work(matrix.new_known_workspace()),
691 my_sub(sanisizer::cast<I<decltype(my_sub.size())> >(my_means.rows()))
695 I<decltype(std::declval<IrlbaMatrix_>().new_known_workspace())> my_work;
696 const Block_* my_block;
697 const CenterMatrix_& my_means;
701 void multiply(
const EigenVector_& right, EigenVector_& output) {
702 my_work->multiply(right, output);
704 my_sub.noalias() = my_means * right;
705 for (I<
decltype(output.size())> i = 0, end = output.size(); i < end; ++i) {
706 auto& val = output.coeffRef(i);
707 val -= my_sub.coeff(my_block[i]);
712template<
class EigenVector_,
class IrlbaMatrix_,
typename Block_,
class CenterMatrix_>
715 ResidualAdjointWorkspace(
const IrlbaMatrix_& matrix,
const Block_* block,
const CenterMatrix_& means) :
716 my_work(matrix.new_known_adjoint_workspace()),
719 my_aggr(sanisizer::cast<I<decltype(my_aggr.size())> >(my_means.rows()))
723 I<decltype(std::declval<IrlbaMatrix_>().new_known_adjoint_workspace())> my_work;
724 const Block_* my_block;
725 const CenterMatrix_& my_means;
726 EigenVector_ my_aggr;
729 void multiply(
const EigenVector_& right, EigenVector_& output) {
730 my_work->multiply(right, output);
733 for (I<
decltype(right.size())> i = 0, end = right.size(); i < end; ++i) {
734 my_aggr.coeffRef(my_block[i]) += right.coeff(i);
737 output.noalias() -= my_means.adjoint() * my_aggr;
741template<
class EigenMatrix_,
class IrlbaMatrix_,
typename Block_,
class CenterMatrix_>
744 ResidualRealizeWorkspace(
const IrlbaMatrix_& matrix,
const Block_* block,
const CenterMatrix_& means) :
745 my_work(matrix.new_known_realize_workspace()),
751 I<decltype(std::declval<IrlbaMatrix_>().new_known_realize_workspace())> my_work;
752 const Block_* my_block;
753 const CenterMatrix_& my_means;
756 const EigenMatrix_& realize(EigenMatrix_& buffer) {
757 my_work->realize_copy(buffer);
758 for (I<
decltype(buffer.rows())> i = 0, end = buffer.rows(); i < end; ++i) {
759 buffer.row(i) -= my_means.row(my_block[i]);
767template<
class EigenVector_,
class EigenMatrix_,
class IrlbaMatrixPo
inter_,
class Block_,
class CenterMatrixPo
inter_>
768class ResidualMatrix final :
public irlba::Matrix<EigenVector_, EigenMatrix_> {
770 ResidualMatrix(IrlbaMatrixPointer_ mat,
const Block_* block, CenterMatrixPointer_ means) :
771 my_matrix(std::move(mat)),
773 my_means(std::move(means))
777 Eigen::Index rows()
const {
778 return my_matrix->rows();
781 Eigen::Index cols()
const {
782 return my_matrix->cols();
786 IrlbaMatrixPointer_ my_matrix;
787 const Block_* my_block;
788 CenterMatrixPointer_ my_means;
791 std::unique_ptr<irlba::Workspace<EigenVector_> > new_workspace()
const {
792 return new_known_workspace();
795 std::unique_ptr<irlba::AdjointWorkspace<EigenVector_> > new_adjoint_workspace()
const {
796 return new_known_adjoint_workspace();
799 std::unique_ptr<irlba::RealizeWorkspace<EigenMatrix_> > new_realize_workspace()
const {
800 return new_known_realize_workspace();
804 std::unique_ptr<ResidualWorkspace<EigenVector_,
decltype(*my_matrix), Block_,
decltype(*my_means)> > new_known_workspace()
const {
805 return std::make_unique<ResidualWorkspace<EigenVector_,
decltype(*my_matrix), Block_,
decltype(*my_means)> >(*my_matrix, my_block, *my_means);
808 std::unique_ptr<ResidualAdjointWorkspace<EigenVector_,
decltype(*my_matrix), Block_,
decltype(*my_means)> > new_known_adjoint_workspace()
const {
809 return std::make_unique<ResidualAdjointWorkspace<EigenVector_,
decltype(*my_matrix), Block_,
decltype(*my_means)> >(*my_matrix, my_block, *my_means);
812 std::unique_ptr<ResidualRealizeWorkspace<EigenMatrix_,
decltype(*my_matrix), Block_,
decltype(*my_means)> > new_known_realize_workspace()
const {
813 return std::make_unique<ResidualRealizeWorkspace<EigenMatrix_,
decltype(*my_matrix), Block_,
decltype(*my_means)> >(*my_matrix, my_block, *my_means);
826template<
typename EigenMatrix_,
typename EigenVector_>
880template<
typename Value_,
typename Index_,
typename Block_,
typename EigenMatrix_,
class EigenVector_,
class SubsetFunction_>
881void blocked_pca_internal(
886 SubsetFunction_ subset_fun
891 const Index_ ngenes = mat.
nrow(), ncells = mat.
ncol();
892 const auto nblocks = block_details.block_size.size();
894 sanisizer::cast<I<
decltype(output.
center.rows())> >(nblocks),
895 sanisizer::cast<I<
decltype(output.
center.cols())> >(ngenes)
897 sanisizer::resize(output.
scale, ngenes);
899 std::unique_ptr<irlba::Matrix<EigenVector_, EigenMatrix_> > ptr;
900 std::function<void(
const EigenMatrix_&)> projector;
903 ptr.reset(
new TransposedTatamiWrapperMatrix<EigenVector_, EigenMatrix_, Value_, Index_>(mat, options.
num_threads));
904 compute_blockwise_mean_and_variance_tatami(mat, block, block_details, output.
center, output.
scale, options.
num_threads);
906 projector = [&](
const EigenMatrix_& scaled_rotation) ->
void {
910 }
else if (mat.
sparse()) {
929 I<
decltype(extracted.value)>,
930 I<
decltype(extracted.index)>,
931 I<
decltype(extracted.pointers)>
935 std::move(extracted.value),
936 std::move(extracted.index),
937 std::move(extracted.pointers),
941 ptr.reset(sparse_ptr);
943 compute_blockwise_mean_and_variance_realized_sparse(*sparse_ptr, block, block_details, output.
center, output.
scale, options.
num_threads);
946 projector = [&,sparse_ptr](
const EigenMatrix_& scaled_rotation) ->
void {
947 project_matrix_realized_sparse(*sparse_ptr, output.
components, scaled_rotation, options.
num_threads);
952 auto tmp_ptr = std::make_unique<EigenMatrix_>(
953 sanisizer::cast<I<
decltype(std::declval<EigenMatrix_>().rows())> >(ncells),
954 sanisizer::cast<I<
decltype(std::declval<EigenMatrix_>().cols())> >(ngenes)
956 static_assert(!EigenMatrix_::IsRowMajor);
963 tatami::ConvertToDenseOptions opt;
964 opt.num_threads = options.num_threads;
969 compute_blockwise_mean_and_variance_realized_dense(*tmp_ptr, block, block_details, output.
center, output.
scale, options.
num_threads);
970 const auto dense_ptr = tmp_ptr.get();
971 ptr.reset(
new irlba::SimpleMatrix<EigenVector_, EigenMatrix_,
decltype(tmp_ptr)>(std::move(tmp_ptr)));
974 projector = [&,dense_ptr](
const EigenMatrix_& scaled_rotation) ->
void {
975 output.
components.noalias() = (*dense_ptr * scaled_rotation).adjoint();
981 std::unique_ptr<irlba::Matrix<EigenVector_, EigenMatrix_> > alt;
988 I<
decltype(&(output.
center))>
1003 I<
decltype(&(output.
scale))>
1014 if (block_details.weighted) {
1020 I<
decltype(&(block_details.expanded_weights))>
1023 &(block_details.expanded_weights),
1036 const auto& scaled_rotation = scale_rotation_matrix(output.
rotation, options.
scale, output.
scale, tmp);
1037 projector(scaled_rotation);
1041 EigenMatrix_ centering = (output.
center * scaled_rotation).adjoint();
1042 for (I<
decltype(ncells)> c =0 ; c < ncells; ++c) {
1043 output.
components.col(c) -= centering.col(block[c]);
1066 const auto& scaled_rotation = scale_rotation_matrix(output.
rotation, options.
scale, output.
scale, tmp);
1067 projector(scaled_rotation);
1076 if (!options.
scale) {
1077 output.
scale = EigenVector_();
1133template<
typename Value_,
typename Index_,
typename Block_,
typename EigenMatrix_,
class EigenVector_>
1136 const Block_* block,
1140 blocked_pca_internal<Value_, Index_, Block_, EigenMatrix_, EigenVector_>(
1145 [&](
const BlockingDetails<Index_, EigenVector_>&,
const EigenMatrix_&,
const EigenVector_&) ->
void {}
1168template<
typename EigenMatrix_ = Eigen::MatrixXd,
class EigenVector_ = Eigen::VectorXd,
typename Value_,
typename Index_,
typename Block_>
virtual Index_ ncol() const=0
virtual Index_ nrow() const=0
virtual bool prefer_rows() const=0
virtual bool is_sparse() const=0
virtual std::unique_ptr< MyopicSparseExtractor< Value_, Index_ > > sparse(bool row, const Options &opt) const=0
void parallelize(Task_ num_tasks, Run_ run_task)
std::pair< bool, int > compute(const Matrix_ &matrix, const Eigen::Index number, EigenMatrix_ &outU, EigenMatrix_ &outV, EigenVector_ &outD, const Options< EigenVector_ > &options)
double compute_variable_weight(const double s, const VariableWeightParameters ¶ms)
Principal component analysis on single-cell data.
void blocked_pca(const tatami::Matrix< Value_, Index_ > &mat, const Block_ *block, const BlockedPcaOptions &options, BlockedPcaResults< EigenMatrix_, EigenVector_ > &output)
Definition blocked_pca.hpp:1134
void parallelize(Function_ fun, const Index_ tasks, const int threads)
CompressedSparseContents< StoredValue_, StoredIndex_, StoredPointer_ > retrieve_compressed_sparse_contents(const Matrix< InputValue_, InputIndex_ > &matrix, const bool row, const RetrieveCompressedSparseContentsOptions &options)
void convert_to_dense(const Matrix< InputValue_, InputIndex_ > &matrix, const bool row_major, StoredValue_ *const store, const ConvertToDenseOptions &options)
I< decltype(std::declval< Container_ >().size())> cast_Index_to_container_size(const Index_ x)
Container_ create_container_of_Index_size(const Index_ x, Args_ &&... args)
auto consecutive_extractor(const Matrix< Value_, Index_ > &matrix, const bool row, const Index_ iter_start, const Index_ iter_length, Args_ &&... args)
Options for blocked_pca().
Definition blocked_pca.hpp:30
scran_blocks::VariableWeightParameters variable_block_weight_parameters
Definition blocked_pca.hpp:78
int num_threads
Definition blocked_pca.hpp:97
bool components_from_residuals
Definition blocked_pca.hpp:85
bool realize_matrix
Definition blocked_pca.hpp:91
irlba::Options< Eigen::VectorXd > irlba_options
Definition blocked_pca.hpp:102
bool transpose
Definition blocked_pca.hpp:61
scran_blocks::WeightPolicy block_weight_policy
Definition blocked_pca.hpp:72
int number
Definition blocked_pca.hpp:47
bool scale
Definition blocked_pca.hpp:55
Results of blocked_pca().
Definition blocked_pca.hpp:827
EigenVector_::Scalar total_variance
Definition blocked_pca.hpp:849
bool converged
Definition blocked_pca.hpp:874
EigenMatrix_ components
Definition blocked_pca.hpp:836
EigenMatrix_ rotation
Definition blocked_pca.hpp:856
EigenVector_ scale
Definition blocked_pca.hpp:869
EigenMatrix_ center
Definition blocked_pca.hpp:863
EigenVector_ variance_explained
Definition blocked_pca.hpp:843