A Python iteration of the approach chosen got annoyingly verbose when using partition by value and rank - open coded because I wouldn't know which implementation to take use.
I thought C++&STL provided "everything" I wanted to use with the introduction of ranges - it seems I ended up not using any.
I used an "AI" web service for an idea what equivalent "modern C++" code might look like, and tinkering with that and web "IDE"s.
Pretty much none of that code remains as I went from the circumstantial (see above) sort all the eligible values (and abuse sorting once more where order has potentially been messed up) to the partition as needed intended from the outset.
/** leetcode problem 3366: Minimum Array Sum
* given a list nums of non-negative integers
* and three non-negative integers k, op1, and op2
* return the minimum total sum attainable using
* (each at most once per index):
* - up to op1 halvings rounding up
* - up to op2 subtractions of k not resulting in a negative number
*//*
class Solution {
public:
int minArraySum(vector<int>& nums, int k, int op1, int op2) {
return minimum_array_sum(nums, k, op1, op2);
}
};
*/
#include <iostream>
#include <vector>
#include <algorithm>
#include <numeric>
static long int sum_halves(auto first, auto exclude) {
return std::transform_reduce(first, exclude,
0L, std::plus<long>(), [](auto v) {
return v / 2L;
});
}
// sum the top k halves from [first, exclude)
// side effect: permutes [first, exclude)
// XXX useful behaviour with too few values?
static long int sum_top_halves(auto first, auto exclude, int k) {
auto top = exclude - k;
if (top <= first) {
top = first;
} else {
std::nth_element(first, top, exclude);
}
return sum_halves(top, exclude);
}
/** return the minimum total sum attainable using (each at most once per index):
* - up to op1 halvings rounding up
* - up to op2 subtractions of k not resulting in a negative number
*/
static long
minimum_array_sum(const std::vector<int>& nums, int k, int op1, int op2) {
long total = std::reduce(nums.cbegin(), nums.cend(), 0L);
// no-brainers:
if (k <= 0) { // don't subtract values not reducing sum
op2 = 0; k = 0;
}
if (op1 <= 0 && op2 <= 0) // no operations with any effect
return total;
int min_reducible = (k == 1) ? 1 : (op1 > 0 ? 2 : k);
std::vector<int> partitioned;
std::ranges::copy_if(nums.cbegin(), nums.cend(),
std::back_inserter(partitioned),
[min_reducible](auto v){return min_reducible<=v;});
long n = partitioned.size();
const auto begin = partitioned.begin(), end = partitioned.end();
op1 = std::min((long)op1, n);
if (op2 <= 0) { // no subtractions, but halvings needs half a brain:
// just halve up to op1 greatest values
return total - sum_top_halves(begin, end, op1);
}
auto first_big = std::partition(begin, end, [k](auto v){return v<k;});
long big = first_big - begin;
op2 = std::min((long)op2, n - big);
total -= k * op2;
if (op1 <= 0)
return total;
auto untouched = big - op2;
if (op1 <= untouched)
return total - sum_top_halves(first_big, end, op1);
// more than untouched values huge enough to halve before subtraction?
auto huge = end - std::partition(first_big, end,
[k](auto v){return v < 2 * k - 1;});
long halvables = std::max(untouched, huge);
auto halve_first = std::min(halvables, (long) op1);
total -= sum_top_halves(first_big, end, halve_first);
op1 -= halve_first;
if (op1 <= 0)
return total;
for (int i = n - big, beyond = n - halve_first ; i < beyond ; i++)
partitioned[i] -= k;
return total - sum_top_halves(begin, end - halve_first, op1);
}
int main() {
auto t1 = std::vector<int>{2, 8, 3, 19, 3},
t2 = std::vector<int>{2, 4, 3};
std::cout << minimum_array_sum(t1, 3, 1, 1) << '\n'; // 23
std::cout << minimum_array_sum(t2, 3, 2, 1) << std::endl; // 3
return 0;
}
Would the benefits of more const correctness outweigh impaired(?) readability?
Would this approach benefit significantly from using ranges?
Should I convert the for
-loop to an algorithm? for_each
?
Should the first, exclude
parameters to "the sum_halves()
explicitly be references? (How?)