143 internal::compute(num_dim, num_obs, batches, output, options);
169 std::vector<const Float_*> batches;
170 batches.reserve(num_obs.size());
171 for (
auto n : num_obs) {
172 batches.push_back(input);
173 input +=
static_cast<std::size_t
>(n) * num_dim;
175 compute(num_dim, num_obs, batches, output, options);
201 const BatchIndex nbatches = (num_obs ?
static_cast<BatchIndex>(*std::max_element(batch, batch + num_obs)) + 1 : 0);
202 std::vector<Index_> sizes(nbatches);
203 for (Index_ o = 0; o < num_obs; ++o) {
209 bool already_sorted =
true;
210 for (Index_ o = 1; o < num_obs; ++o) {
211 if (batch[o] < batch[o-1]) {
212 already_sorted =
false;
216 if (already_sorted) {
217 compute(num_dim, sizes, input, output, options);
221 std::size_t accumulated = 0;
222 std::vector<std::size_t> offsets(nbatches);
224 offsets[b] = accumulated;
225 accumulated += sizes[b];
229 std::vector<Float_> tmp(num_dim *
static_cast<std::size_t
>(num_obs));
230 std::vector<const Float_*> ptrs(nbatches);
232 ptrs[b] = tmp.data() + offsets[b] * num_dim;
235 for (Index_ o = 0; o < num_obs; ++o) {
236 auto current = input +
static_cast<std::size_t
>(o) * num_dim;
237 auto& offset = offsets[batch[o]];
238 auto destination = tmp.data() + num_dim * offset;
239 std::copy_n(current, num_dim, destination);
243 internal::compute(num_dim, sizes, ptrs, output, options);
244 internal::restore_input_order(num_dim, sizes, batch, output);
void compute(std::size_t num_dim, const std::vector< Index_ > &num_obs, const std::vector< const Float_ * > &batches, Float_ *output, const Options< Index_, Float_, Matrix_ > &options)
Definition mnncorrect.hpp:142
std::shared_ptr< knncolle::Builder< Index_, Float_, Float_, Matrix_ > > builder
Definition mnncorrect.hpp:54