author: exhausted, developer: exhausted
Editorial
Tutorial is loading...
Solution
#include <bits/stdc++.h>
using i64 = long long;
void solve() {
int n;
std::cin >> n;
std::vector<int> a(n);
for (int i = 0; i < n; i++) {
std::cin >> a[i];
}
std::sort(a.begin(), a.end());
int p = (n + 1) / 2 - 1;
int res = std::count(a.begin() + p, a.end(), a[p]);
std::cout << res << "\n";
}
signed main() {
std::ios::sync_with_stdio(false);
std::cin.tie(nullptr);
int t = 1;
std::cin >> t;
while (t--) {
solve();
}
}
author: max0000561, developer: yunetive29
Editorial
Tutorial is loading...
Solution
#include <bits/stdc++.h>
using namespace std;
#define int long long
const int P = 1e9 + 7;
void solve() {
int n, k;
cin >> n >> k;
vector<int> a(n);
for (int i = 0; i < n; i++)
cin >> a[i];
int S = 0, sum = 0;
int cur = 0;
for (int i = 0; i < n; i++) {
sum += a[i];
cur += a[i];
cur = max(cur, 0LL);
S = max(S, cur);
}
sum = (sum % P + P) % P;
S = S % P;
int t = 1;
for (int i = 0; i < k; i++) {
t = t * 2 % P;
}
int ans = (sum + S * t - S + P) % P;
cout << ans << '\n';
}
signed main() {
//cout << fixed << setprecision(20);
ios::sync_with_stdio(false);
cin.tie(nullptr);
int T = 1, G = 1;
//cin >> G;
cin >> T;
while (T--)
solve();
return 0;
}
author: exhausted, developer: exhausted
Editorial
Tutorial is loading...
Solution
#include <bits/stdc++.h>
using i64 = long long;
void solve() {
int n, k;
std::cin >> n >> k;
std::vector<std::vector<int>> adj(n);
for (int i = 0; i < n - 1; i++) {
int v, u;
std::cin >> v >> u;
--v, --u;
adj[v].emplace_back(u);
adj[u].emplace_back(v);
}
auto check = [&](int x) {
int res = 0;
auto dfs = [&](auto self, int v, int f) -> int {
int sz = 1;
for (int u : adj[v]) {
if (u == f) {
continue;
}
sz += self(self, u, v);
}
if (sz >= x && f != v) {
++res, sz = 0;
}
return sz;
};
int t = dfs(dfs, 0, 0);
return (res > k || (t >= x && res == k) ? true : false);
};
int low = 1, high = n / (k + 1) + 1;
while (high - low > 1) {
int mid = (low + high) / 2;
if (check(mid)) {
low = mid;
} else {
high = mid;
}
}
std::cout << low << "\n";
}
signed main() {
std::ios::sync_with_stdio(false);
std::cin.tie(nullptr);
int t = 1;
std::cin >> t;
while (t--) {
solve();
}
}
author: azureglow, developer: azureglow
Editorial
Tutorial is loading...
Solution
#include <bits/stdc++.h>
using namespace std;
using ll = long long;
#define int ll
#define all(a) a.begin(), a.end()
void solve() {
int n, x;
cin >> n >> x;
++x;
vector<int> a(n);
for (int &i: a)
cin >> i;
int res = -1;
for (int i = 30; i >= 0; --i) {
vector<int> b;
bool open = false;
for (int j = 0; j < a.size(); ++j) {
if (!open)
b.push_back(a[j]);
else
b.back() ^= a[j];
if (a[j] & (1 << i))
open = !open;
}
if (!(x & (1 << i))) {
if (open) {
cout << res << '\n';
return;
}
a = b;
} else {
if (!open)
res = max(res, (int) b.size());
}
}
cout << res << '\n';
}
signed main() {
ios_base::sync_with_stdio(false);
cin.tie(nullptr);
int t = 1;
cin >> t;
while (t--)
solve();
}
Solution (74TrAkToR)
#include<bits/stdc++.h>
using namespace std;
int const maxn = 1e5 + 5;
int a[maxn];
int solve(int n, int x) {
int res = 0, curr = 0;
for (int i = 1; i <= n; i++) {
curr ^= a[i];
if ((curr|x) == x) curr = 0, res++;
else {
if (i == n) return -1;
}
}
return res;
}
main() {
ios_base::sync_with_stdio(0);
cin.tie(0);
cout.tie(0);
int t;
cin >> t;
while (t--) {
int n, x;
cin >> n >> x;
for (int i = 1; i <= n; i++) cin >> a[i];
int ans = -1;
ans = max(ans, solve(n, x));
for (int b = 29; b >= 0; b--) {
if ((x>>b)&1) {
int y = (x^(1 << b));
for (int c = b - 1; c >= 0; c--) {
if (((y>>c)&1) == 0) y ^= (1 << c);
}
ans = max(ans, solve(n, y));
}
}
cout << ans << '\n';
}
return 0;
}
author: yunetive29, developer: yunetive29
Editorial
Tutorial is loading...
Solution
#include <bits/stdc++.h>
using i64 = long long;
template<class T>
constexpr T power(T a, i64 b) {
T res = 1;
for (; b; b /= 2, a *= a) {
if (b % 2) {
res *= a;
}
}
return res;
}
template<int P>
struct MInt {
int x;
constexpr MInt() : x{} {}
constexpr MInt(i64 x) : x{norm(x % P)} {}
constexpr int norm(int x) const {
if (x < 0) {
x += P;
}
if (x >= P) {
x -= P;
}
return x;
}
constexpr int val() const {
return x;
}
explicit constexpr operator int() const {
return x;
}
constexpr MInt operator-() const {
MInt res;
res.x = norm(P - x);
return res;
}
constexpr MInt inv() const {
assert(x != 0);
return power(*this, P - 2);
}
constexpr MInt &operator*=(MInt rhs) {
x = 1LL * x * rhs.x % P;
return *this;
}
constexpr MInt &operator+=(MInt rhs) {
x = norm(x + rhs.x);
return *this;
}
constexpr MInt &operator-=(MInt rhs) {
x = norm(x - rhs.x);
return *this;
}
constexpr MInt &operator/=(MInt rhs) {
return *this *= rhs.inv();
}
friend constexpr MInt operator*(MInt lhs, MInt rhs) {
MInt res = lhs;
res *= rhs;
return res;
}
friend constexpr MInt operator+(MInt lhs, MInt rhs) {
MInt res = lhs;
res += rhs;
return res;
}
friend constexpr MInt operator-(MInt lhs, MInt rhs) {
MInt res = lhs;
res -= rhs;
return res;
}
friend constexpr MInt operator/(MInt lhs, MInt rhs) {
MInt res = lhs;
res /= rhs;
return res;
}
friend constexpr std::istream &operator>>(std::istream &is, MInt &a) {
i64 v;
is >> v;
a = MInt(v);
return is;
}
friend constexpr std::ostream &operator<<(std::ostream &os, const MInt &a) {
return os << a.val();
}
friend constexpr bool operator==(MInt lhs, MInt rhs) {
return lhs.val() == rhs.val();
}
friend constexpr bool operator!=(MInt lhs, MInt rhs) {
return lhs.val() != rhs.val();
}
};
constexpr int MOD = 1e9 + 7;
using Z = MInt<MOD>;
namespace comb {
int n = 0;
std::vector<Z> _fac = {1};
std::vector<Z> _invfac = {1};
std::vector<Z> _inv = {0};
void init(int m) {
if (m <= n) return;
_fac.resize(m + 1);
_invfac.resize(m + 1);
_inv.resize(m + 1);
for (int i = n + 1; i <= m; i++) {
_fac[i] = _fac[i - 1] * i;
}
_invfac[m] = _fac[m].inv();
for (int i = m; i > n; i--) {
_invfac[i - 1] = _invfac[i] * i;
_inv[i] = _invfac[i] * _fac[i - 1];
}
n = m;
}
Z fac(int m) {
if (m > n) init(2 * m);
return _fac[m];
}
Z invfac(int m) {
if (m > n) init(2 * m);
return _invfac[m];
}
Z inv(int m) {
if (m > n) init(2 * m);
return _inv[m];
}
Z binom(int m, int k) {
if (m < k || k < 0) return 0;
return fac(m) * invfac(k) * invfac(m - k);
}
} // namespace comb
void solve() {
int n, m1, m2;
std::cin >> n >> m1 >> m2;
std::vector<int> p(m1), s(m2);
for (int i = 0; i < m1; i++) {
std::cin >> p[i];
}
for (int i = 0; i < m2; i++) {
std::cin >> s[i];
}
if (p[0] != 1 || s[0] != p[m1 - 1] || s[m2 - 1] != n) {
std::cout << "0\n";
return;
}
Z res = comb::binom(n - 1, s[0] - 1);
for (int i = m1 - 2; i > -1; i--) {
res *= comb::binom(p[i + 1] - 2, p[i + 1] - p[i] - 1) * comb::fac(p[i + 1] - p[i] - 1);
}
for (int i = 1; i < m2; i++) {
res *= comb::binom(n - s[i - 1] - 1, s[i] - s[i - 1] - 1) * comb::fac(s[i] - s[i - 1] - 1);
}
std::cout << res << "\n";
}
signed main() {
std::ios::sync_with_stdio(false);
std::cin.tie(nullptr);
#ifndef ONLINE_JUDGE
freopen("/home/q1n/coding/input.txt", "r", stdin);
freopen("/home/q1n/coding/output.txt", "w", stdout);
#else
// online submission
#endif
int t = 1;
std::cin >> t;
while (t--) {
solve();
}
}
author: exhausted, yunetive29, developer: exhausted, yunetive29
Editorial
Tutorial is loading...
Solution
#include <bits/stdc++.h>
using i64 = long long;
template<class Info>
struct Fenwick {
std::vector<Info> t;
int n;
Fenwick(int n = 0) : n(n) {
t.resize(n);
}
void add(int x, const Info &v) {
for (int i = x + 1; i <= n; i += i & -i) {
t[i - 1] = t[i - 1] + v;
}
}
Info sum(int x) {
x++;
Info res = Info();
for (int i = x; i > 0; i -= i & -i) {
res = res + t[i - 1];
}
return res;
}
Info rangeSum(int l, int r) {
Info res = sum(r) - sum(l - 1);
return res;
}
};
void solve() {
int n, q;
std::cin >> n >> q;
std::vector<int> a(n), pos(n + 1);
for (int i = 0; i < n; i++) {
std::cin >> a[i];
}
std::reverse(a.begin(), a.end());
for (int i = 0; i < n; i++) {
pos[a[i]] = i;
}
constexpr int K = 19;
std::vector<i64> res(q);
std::vector<std::vector<std::pair<int, int>>> qry(n);
for (int i = 0; i < q; i++) {
int l, r;
std::cin >> l >> r;
l--, r--;
std::swap(l, r);
l = n - l - 1;
r = n - r - 1;
qry[r].emplace_back(l, i);
}
std::vector<i64> dp(n + 1);
Fenwick<i64> f(n);
for (int r = 0; r < n; r++) {
int x = a[r];
dp[x] = 1;
// n * log(n) * log(n)
for (int y = x; y <= n; y += x) {
if (pos[y] > pos[x]) {
continue;
}
for (int z = 2 * y; z <= n; z += y) {
if (pos[z] > pos[y]) {
continue;
}
dp[z] += dp[y];
}
}
// n * log(n) * log(n)
for (int y = x; y <= n; y += x) {
f.add(pos[y], dp[y]);
dp[y] = 0;
}
// q * log(n)
for (auto [l, i] : qry[r]) {
res[i] += f.rangeSum(l, r);
}
}
for (int i = 0; i < q; i++) {
std::cout << res[i] << " \n"[i == q - 1];
}
}
signed main() {
std::ios::sync_with_stdio(false);
std::cin.tie(nullptr);
int t = 1;
std::cin >> t;
while (t--) {
solve();
}
}
For C, why is there an entire paragraph discussing why rerooting doesn't affect the answer, but no discussion on why the greedy algorithm is optimal?
BTW, I created video editorial for D: Birthday Gift.
With fixed x you want to maximize number of edges to remove. Consider lowest subtree with >= x vertices, if we dont cut edge right up this subtree, we can move closest edge from top and answer won't get worse
But when you when you disconnect a subtree with its parent, how do you make sure its parent's component still has more than x vertices?
We are trying to make as many cuts as possible only considering the current subtree, but we do nothing to ensure the parent still has x or more.
What am I missing?
once there is a subtree with more than x points,you count it as a connected component and cut the connection with the subtree and it's father.
it is shown that if the connected component which include the root maybe has x-less points.but it wasn't counted before.you just need to think it as add a edge which was cuttted before from this connected component to others.it won't affect the counting number.
so essentially, for a fixed x, and number of cuts k, you want k + 1 components whose size >= x. If you have 3 components, what's left of the chopped up parent can just be added to the components we just made
same doubt and also should we not have to check for k+1 connected components? as if we remove k edges , no. of connected components will be k+1 if i'm not wrong...
Yeah, but I think the idea is that when you're running the algorithm, you're not guaranteeing that the connected component containing the root node has size of at least x. So, if you remove k edges, you get k+1 connected components, but for one of them you can just connect it back to the component with the root node, giving you k connected components. That way it's unnecessary to check whether the component with the root node has size at least x.
we will remove the subtree sizes which we have cut already so that way will not count the removed subtree and after that we just check wether the component of which has root have the size greater that >= x its okay otherwise we have to make 1 less cut.
you can check this,
Why is there a need to decrease cnt if the root node has a size less than m? cnt is incremented only if we have gotten a size greater than m. So we never had an extra number in cnt. I do not understand the need to decrease it
Consider this case.
n = 7, k = 3
1 2
2 3
1 4
4 5
1 6
6 7
To my understanding, the algorithm will cut edges (1, 2), (1, 4) and (1, 6). The cnt guarantees [0, cnt-1] cuts are possible, not exactly cnt cuts because it depends on the last group (root group) we made if its size >= particular x we are checking.
You can try adding extra edge like (1, 8) and see what happens.
root_comp_size < m
all other comp size >= m
so to make the root_comp also have size >= m , we can merge any adjacent other component to do that we need to remove 1 cut, which is what we did
I am just proving that it's optimal, like if we consider best way to choose edges, we can move edge lower, while subtree size is >= x. About algorithm of checking, of course we root a tree, then do this greedy, and we just need to check in the end that root component size is >= x, if not, then we can remove only 1 edge, cause all other comps are of size >= x
Just noting down my understanding if anyone is still confused about this:
There are really only two cases to think about
If the root component has at least x vertices
If the root component has less than x vertices
In the first case, it is straight forward. We simply have to check if we have at least
k + 1
components, since making k cuts would lead tok + 1
components.In the second case, it is a bit of a think. As per most implementations, if we have less than x vertices, we have not added this component to our count of components yet. So we must increment this count by 1.
But since we have less than x vertices, we also want to re-unite this component with another (that already has at least x vertices, which is why it had gotten disconnected in the first place). So we must also decrement the component count by 1.
So ultimately, our component count remains the same, and we simply want to check if we have at least
k + 1
components, for the same reason that making k cuts would lead tok + 1
components.Ref [C++ clean]: https://codeforces.net/contest/1946/submission/252842752
During up-solving I found BFS thoughts more intuitive, if you consider from last level (leaf nodes) and then go upwards. I realised that the only way to break subtree optimally was the one that is mentioned in editorial. Though I couldn't implement BFS solution
https://codeforces.net/contest/1946/submission/252941516
Thank you very much!
C editorial is fucking trash
btw I dont know how C have 4k submissions in contest time, I waste ~4-5 hours for it
even after reading editorial lol
https://www.hackerrank.com/challenges/even-tree/problem
problem C mega sexy... during contest it just gave my skills a serious reality check
because he is exhausted ..... LMAO
Nice editorial . Really liked Problem C and D .
I see at least two things gone horribly wrong for this round:
A: Okay, this should probably seem like a nitpick for non-problemsetters, but you should have let $$$\mathcal{O}(n^2)$$$ pass for the position. Why exactly? Because this is a damn rule. Didn't KAN remind you?
F: Who even decided $$$\mathcal{O}(n \log^2 n)$$$ intended with $$$n=10^6$$$ was a good idea? Why? What led you to think so?
I don't see why my code is wrong for https://codeforces.net/contest/1946/submission/252819046 but keep getting "wrong answer on pretest 2 expected 12 but got 19 in 3096th number or so". Can you check my python submission in my history? It seems (exactly same idea) similar to a solution I found which was accepted.
The median will not always be the ceil[n/2]-1 position. It will be only for even number of elements. For odd number of elements , you don't need to do the -1
idk why you're asking me, but
ceil(n/2)
is a very bad idea. always use integer arithmetics for these use cases, like(n+1)//2
for ceil (replace//
with integer floor division in your language).It doesn't explain this though:
(accepted solution): https://codeforces.net/contest/1946/submission/252819115 (wrong solution but same as above): https://codeforces.net/contest/1946/submission/252819046
I looked at your solution and honestly I have no idea why it's wrong.
What's even stranger is that these two identical solutions (one is placed inside a function) works, but if not inside a function, it doesn't work? I don't know if .split(" ") makes it process data wrong?
(accepted solution): https://codeforces.net/contest/1946/submission/252819115 (wrong solution but same as above): https://codeforces.net/contest/1946/submission/252819046
oh well,
input().split()
is a list of strings. if you sort a list of strings it will be sorted in the lexicographical order, which is definitely undesired behaviour. Notice that $$$\texttt{"1000"}<\texttt{"2"}$$$ holds in lexicographical order.Nice! I missed that. Mystery solved.
$$$\mathcal{O}(n \log^2 n)$$$ isn't that bad for $$$n = 10^6$$$ especially when you aren't doing heavy stuff with sets or segment trees, and the TL is 6 seconds. Personally, my solution passed pretty close to time limit though, but as I said in a previous comment, they probably wanted to cut $$$n \log^3 n$$$ solution.
But $$$n=2 \times 10^5$$$ is more than enough to cut most $$$\mathcal{O}(n\log^3 n)$$$ solutions of any constant? I mean the single $$$\log n$$$ is already dozen times the difference, I can't see why would they fail to block a slower solution with $$$n=2 \times 10^5$$$. Unless they wanted to block an $$$\mathcal{O}(n\sqrt n)$$$ solution, which doesn't look like a very good decision after all if that solution doesn't make the task much easier to solve.
I see your point. They could have reduced both the limits and TL in order to make $$$n \log^2 n$$$ pass comfortably enough.
Oh, also another thing. I think the tight TL makes Python / Java solutions almost impossible to pass (evidenced by the fact that there are no AC Python or Java solutions yet, even in practice mode).
Two of the logarithms are from fenwick and harmonic series, so they are smaller than usual log
Why would that make a difference? The difference between $$$\log_2 x$$$ and $$$\ln x$$$ is constant, i.e. $$$1/\ln(2) \approx 1.44$$$ times, which suggests that it shouldn't be that much smaller than the usual log. Also, what makes you think Fenwick must have a smaller constant? Though it is hard to admit it, many sources suggest that the Fenwick tree's speed is on par with a well-written bottom-up segment tree. If their intention is to let Fenwick tree pass and block segment trees, wouldn't that be one of the most stupid decisions ever?
I wrote the same solution with a bottom up segment tree (253203142) and with a fenwick tree (253207869).
The solution with the fenwick tree got AC in ~5000 ms while the one with the segment tree got TLE 12.
In my opinion it's better to let $$$O(n\log^3n)$$$ solutions pass rather than cutting off some $$$O(n\log^2n)$$$ solutions because of the constant factor.
Thanks to this, I uphacked all solutions with
Arrays.sort((primitive type)[])
without randomization I found written in Java 8, not sure Java 21 can be hacked toowhat us hacking and how to hack?
Uphacking is available only for 1900+ rated users, see https://codeforces.net/blog/entry/68204
okay thanks
How is problem A $$$\mathcal{O}(n \log^2 n)$$$? It's clearly $$$\mathcal{O}(n \log n)$$$ per test case. 252755171
sir I wrote $$$\mathcal{O}(n \log^2 n)$$$ for F not A
I'm sorry, I didn't notice that "F" :(
Where are these rules mentioned? Like the one you've mentioned in the spoiler.
There is a document titled "Polygon advices", which starts with a huge text stating "IF YOU ARE TO PREPARE A ROUND ON CODEFORCES, THESE ARE RULES. FOLLOW THEM AS CLOSE AS YOU CAN". I do not know if I am allowed to disclose further; ask KAN for details.
https://codeforces.net/contest/1946/submission/252802394
Can anyone help find the test case for which my code is wrong? Also whats wrong in the approach
Greedily creating segments, left to right, in such a fashion might be wrong.
It would fail for the following test case
5 2
1 3 0 0 2
Expected: 4
Your code outputs 2. It tries to group [1],[3,0,0,2] instead of [1,3],[0],[0],[2]
Thank you.
Problem E is just liitle modification of past problem. Not new.
Could you give me the past problem
Can somebody please explain why the solution of the problem F works in $$$O(N\log^2(N))$$$?
There are $$$O(N\log^2(N))$$$ pairs of $$$(a,b,c)$$$ such that $$$a | b$$$ and $$$b |c$$$ and $$$ a < b < c $$$.
Can you please provide a proof for this statement?
What we need is $$$\sum \sum N/i/j$$$ for $$$1 \le i \le N$$$, $$$1 \le j \le N$$$
$$$\sum 1/i$$$ is $$$\log N$$$ for $$$1 \le i \le N$$$
$$$\sum 1/j$$$ is $$$\log N$$$ for $$$1 \le j \le N$$$
$$$\sum \sum N/i/j$$$ can be written as $$$N (\sum 1/i) * (\sum 1/j)$$$ for $$$1 \le i \le N$$$, $$$1 \le j \le N$$$
This leads to $$$O(N*log^2N)$$$ upper bound on no of such pairs.
Fix $$$a$$$. Now the number of pairs of $$$(b,c)$$$ that satisfy $$$a<b<c$$$ and $$$a|b$$$ and $$$b|c$$$ is bounded by
Now, the total number of $$$(a,b,c)$$$ pairs are around
Hi, could you explain more about the sum (1 / i) part? Is that the part of counting Cs when fixing a and b?
Why haven't you provided your code along with the editorial ? If I am not able to implement any solution provided above, should I go through all the submissions of users and try to find out which user has solved the problem according to approach described in the editorial ?
added
i can t understand the C solution , can u guys suggest me a similar classic problem? , to understand the idea ?
this is the closest thing I could find, should help with understanding the checker for binary search
thanks bro
Hi, for problem C I can't understand why if we have at least k+1 vertices with at least x vertices in its subtrees our condition is resolved. Can anyone explain it briefly?
Because if we can get more than k+1 components then we would merge some components to make number of components to be exact k+1 and in doing so the size of final merged components would still remain>=x(since the size would only increase) and so the condition is still satisfied
I think that the question told us that we should omit exactly k edges and after that, we should have at least x nodes in each remaining component but this solution is for situations where we want to only omit one edge and after that, we want to have at least x nodes in each remaining component. Am I right or does this solution support the first situation too?
the solution tells us to omit as many edges as possible ensuring that each component has size>=x
For problem C, If I do a recursive dfs, it gives TLE(Python). Can someone who solved in python tell me how to get around this? I would appreciate it a lot. Thank you
can't get around this
wish i read D first
D was much harder I think.
High quality editorial!
In problem E I think it should be
nCr[(pm1)-2,p(m1-1)-1]
since position of pm1 and (pm1)-1 both are fixed.Because we have
(pm1)-1
element and maximum element out of this is automatically atp(m1-1)th
position.thanks! fixed
Here is my stream discussin these problems
My contest screencast see it if you want to know what I was doing in the contest.
Sir can you explain In problem D why we will count no of submasks present in an array.Basically I didn't understand the getmaxlength() function
Initially, I misread the problem and tried to solve different version.
We dont need
getmaxlength()
function. The submission 252805024 that got AC did not even have this.Disclaimer: This comment assumes you have watched the stream and uses some of the definitions mentined in the stream.
The crucial idea is -
Lets say we want to find no of maximum partitions such that on
(pref[0]^pref[i])|(pref[i]^pref[j])|(pref[j]^pref[k])|(pref[k]^pref[n]) = M
Notice thatpref[0]=0
, sopref[i]
should be submask ofM
.Now lets look at
pref[i]^pref[j]
, we want it to be submask ofM
.So all the bits that are 0 in
M
, should be 0 inpref[i]^pref[j]
.If
k
th bit inpref[i]^pref[j]
is 0, ifk
th bit of bothpref[i]
andpref[j]
are same.Now, because all
pref[i]
is submask ofM
, so any bit that is0
inM
will also be 0 inpref[i]
.Because for any bit that is
0
inM
should be same for bothpref[i]
andpref[j]
, we have a condition that any bit that is0
inM
should also be0
inpref[j]
.Now, this is the definition of submask. We have reached a conclusion that
pref[j]
should be submask ofM
.Now, we can use the same reasoning to get
pref[k]
should be submask ofM
too. So on for allpref[*]
we need.Then the idea was we do not have to bruteforces
X
submasks, we can get it done usinglogX
different submasks.neat!
in problem a given solution yields output 1 for input of 2 member array (2,4). while the answer should be 2. if we use 1 operation value will be 7/2 which is ultimately 3.
heres the editorials solution:
void solve() { int n; std::cin >> n; std::vector a(n); for (int i = 0; i < n; i++) { std::cin >> a[i]; } std::sort(a.begin(), a.end()); int p = (n + 1) / 2 — 1; int res = std::count(a.begin() + p, a.end(), a[p]); std::cout << res << "\n"; }
am i missing something in the logic/ question statement?
edit--never mind read the problem wrong. used preconception of median to write the solution.
I didn't know E was a standard problem.
Solved it slightly differently. This felt a little bit more intuitive to me.
Key Idea: Create a directed tree and find the number of topological orderings possible for the tree.
Root of the directed tree would be $$$s_1$$$ or $$$p_{m_1}$$$ (basically the index where the maximum number occurs).
Now keep adding edges $$$a \rightarrow b$$$, which signifies number at index $$$b$$$ should be smaller than number at index $$$a$$$.
So, child of $$$p_{m_1}$$$ is $$$p_{m_1-1}$$$, child of $$$p_{m_1-1}$$$ is $$$p_{m_1-2}$$$, $$$.....$$$ , child of $$$p_{2}$$$ is $$$p_{1}$$$.
Similarly, child of $$$s_1$$$ is $$$s_2$$$, child of $$$s_2$$$ is $$$s_3$$$, and so on...
The answer required is number of topological orderings of this tree. You can imagine assigning descending numbers to a topological ordering to get one permutation.
A bit of extra handling is required to take into account for indices not included in $$$p$$$ or $$$s$$$, but that's easy as well. I'll leave it up to you, orelse you can see my submission.
actually one of our testers wrote this solution and it can be implemented easily without any handling any corner cases click
Can't open the page. Says — you are not allowed to view the requested page.
So, my original solution to this problem was this. The tutorial provided a different solution, because it seemed more understandable
FYI it is the same as
i would rather code the first than think about the most optimum way to code it sirs
252835387 Can someone please tell the test case where my code is failing. Thanks
Take a look at Ticket 17427 from CF Stress for a counter example.
Can someone explain the editorial of D? What does this mean "After this, if there is a 1 in x in this bit, update the answer and return the array to its original state."?
And is there any other approach to this problem?
According to me what they are trying to say is that if we have even 1 at ith POS and x ith bit is also 1 then we do not need to partition it further.
Which means we can return array before partition.
I think this is the most intuitive approach.
For F:
I wrote a $$$\mathcal O(\sum cnt^2_{divisors})$$$ and thought it was brute force but it passed. It can be really short and fast if implemented well.
Why it's the intended solution? $$$n=10^6$$$ usually leads to a $$$\mathcal O(n\log n)$$$ solution.
Anyway, a typical 74 math problem.
what is a typical "74 math problem"?
Both the idea and constraints gives me a strange feeling. 1732D2 - Balance (Hard version)
In problem F I think it should be t1 >= L instead of <=.
thanks! fixed it
I am wondering what is the limit of F's answer. Can someone prove that F's answer is in range of int or long long?
Does anyone have D's solution using DSU?
C u mean?
No. D has a tag of DSU. edit: In the morning it was showing tag of DSU but it's now removed. Why such?
I had such solution, you may check my submission
It's hard to grasp the underlying logic behind your DSU solution merely by looking at the code. Can you please elaborate your idea behind the solution?
When we go bit by bit, we might need to unite segments in one component, so I used dsu to store the number of components. I also used set to effectively unite neighbouring numbers and rollbacks, since I needed to roll after attempting each bit
252772529
Hey, can you explain your solution. Thanks
can you just read tutorial?
правильно, туда его, пусть идет решение читает
I was talking about the above code you mentioned in the comment. I did not see it was not yours
Reading proof for C was a nice journey
why everybody used lambda function in c instead of normal function?? (especially candidate master and above)?
With normal functions, you must make all variables and stuff global, so you must clear them after every test case, and this is so boring and waste of time.
and one more thing that we don't have pass many of the data that we pass in function
Please somebody explain first part of solution C, how we can get the same solution for x — 1, if we have it for x?
Suppose you want to know that is it possible to cut the tree using k edges in such a way that every component has at least x nodes. now suppose that the answer is affirmative,i.e., every component has at least x nodes. now if you rephrase the problem after replacing x with x-1, you see that if we cut the tree in the same way as we did to find the answer to whether is it possible to cut the tree so that every component has greater than or equal to x nodes, you find that every component has greater than x-1 nodes(as every component has >=x nodes).
I understand, that is, we need x — 1 vertices, but we already have x. I thought that we need exactly x — 1 :) As a result, we have answers [1, 1, 1, ..., 1, 0, 0 ...0] and we can use binary search to find the maximum possible x. Now it remains to write the checking function. Thanks, it's clear now!
welcome :)
https://codeforces.net/contest/1946/submission/252897123 Can anyone help find the test case for which my code is wrong?
That's problem D.
I see.
Nice to see helpful people in this community!
Problem B :Can somebody explained me why i am getting overflow issue:https://codeforces.net/contest/1946/submission/252902592
Never mind. Got it.
is there a way to use dsu in problem c? like if we remove all of the edges first and then start combining components?
Solution to problem C using Tree trimming submission
B is really just kadane
[submission:252783316]why is this code wrong,i just found indices of subarray with max sum and added it 2 power k times and finally added rest members of array other than those indices(indices of max sum subarray) to final sum
I don't understand a single thing about C editorial. Can somebody explain the greedy algorithm with actual proof?
This is my solution to problem C, the idea is same as the editorial still I'm getting TLE. Can anyone explain? https://codeforces.net/contest/1946/submission/252888566
can't understand the proof of problem C. whats the first case and the second case? and whats the first variant? whats the first time? Do "case", "variant" and "time" mean the same thing?
For such a binary tree and x=3. If I start the dfs on node 1. I think the algorithm would remove the edge 1-2. But if I start from node 2 and traverse node 1 at last. The algorithm would not remove the edge 1-2. although the answer is still correct the set of removal edges is not coincide. Could anyone please tell me where I might have misunderstood?
no, edge 1-2 will not be deleted, because you can renumerate node 2 and 3, in this case dfs remove only edge 1-3
Emm... Sorry im still confused. So u mean when dfs starts from node 1 and visits node 3 first, it would only remove edge 1-3? But the subtree of node 2 also meets the requirement(components > 3). why dfs do not cut the edge 1-2 since it would visit node 2 anyway.
because you return 1 edge if size of node 1 component is less than x
got it! thanks bro
I am just noticing the problem D is not having binary search tag, I thought Binary search is the most intuitive solution for D as I solved using binary search during contest. And also the editorial logic is not matching with the intuition i had ?
I did binary search on answer, like for k pairs what is the minimum number I can create satisfying <= x which was the difficult part.
Don't know I did binary search on C also, may be my thought process got biased ?
Hey there, i checked your solution, but could not understand the logic behind it. Can you please elaborate your solution?
Thanks!
Sure, so if you are asking how I approach the solution then its like I see that we have to calculate the maximum pairs k , so may be we can binary search on the answer k.
so to model the problem as binary search, I did like "lets say we have to take at least K pairs, in that case what is the minimum number we can make".
If you see the modeling of problem also bit non-trivial, I am saying we have take at least K pairs (this 'atleast' is most important for writing the checker function correctly)
so in the checker function what I am doing is trying to make the smallest number possible where I need to at least K pairs.
Now if you want to know how exactly I have written the code, my code is clean you can check should be self-explanatory if you are good with bitmasking and OR minization techniques (which I am not so good at from time to time).
I can tell you the concept, I am going from MSB to LSB, and checking if the current bit I can make it 0 or not. If we can make it 0, we will update our answer for that particular bit as 0, otherwise as 1.
Hi! I have a question about problem B:
5 1
4 -2 8 -12 9
For the above test case, if I select [-12] as the subarray and insert its sum (-12) in the array, the sum of the array will be 4-2+8-12+9-12 = -5. And -5 modulo (10^9 + 7) is 1000000002. Why is this not the optimum answer?
your motive is to calculate the maximum answer without modulo. we are ask to perform % (1e9 + 7) as answers can be pretty large and can't be represented in 64bit integer.
you are not supposed to take the value which is giving you the maximum after modulo.
Got it! Thank you so much ^_^
Problem C was very nice, it's very similar to this one (except for the binary search part):https://codeforces.net/contest/1833/problem/G
I think some problem setters really don't care about quality of Div2.B problems anymore. This Div2.B feels like filler problem, just invented at last moment or something. It's just kadane algorithm. Nothing more, nothing less.
How is the formula for A derived?
very good competition.I like it!!!
Felt problem C is not obvious -- just implemented and got AC but re-rooting argument in editorial is quite nice
I have a question, the x in question B I think is constantly changing, the answer at k = m should depend on the x of the question at k = m — 1, but then the x should have changed when transferring shouldn't it, or am I thinking too much about it :(
In E, as written in editorial :- p1 should be 1 for some valid permutation to exist. 1. Is it because, it's the first element and it will be maximum in it's segment because there's no element before it. 2. If the above reasoning is true, then shouldn't s1 also be equal to 1, since aN is the last element.
I think I've been missing something, this code gives wrong result for test case 6 1000, 6x -1000000000:
I can't really find the problem, I looked at the editorial code and it seems fine to me.
Also if anyone cares to elaborate, why can we use int for sum (like editorial does) can't that overflow the value, does OP knows that test cases won't to that, or is there something else that I'm missing?
Oh so basically problem was that sum was int actually, I tought I already tried it as long long. Also I now see that in editorial int was defined as long long, idk why would someone do that, it's confusing to beginners but okay.
Hello, i just wanted to ask about this solution. Isn't (sum % P + P ) % P = (sum % P ); So why write that extra +P?
really liked problem F, thanks for the contest!
In the editorial for C:
If after this process there are at least k connected components, then the condition is satisfied for this x , otherwise it is not.
Shouldn't it be k+1 connected components since we are removing k edges.
you are right! fixed
Can anybody help me with B by providing a resource for me to understand modular arithmetic as required for such problems? For example, why must we take
t = t * 2 % P;
instead of simply adding first and then taking the modulo.Thanks in advance.
Hey, did you end up figuring it out. I was just doing this problem for practice and it seems I have the right algorithm but I don't get the modulo stuff either
Hi, unfortunately I was unable to understand that part.
exhausted
As per editorial of problem:D, I have one small doubt.
suppose my array is [ 2 , 1 , 2 , 2 , 1 , 2 ] and my x = 0;
since we want to maximize the number of segments, each segment must contain exactly 2, such numbers
Can you please explain, how to handle above case from the editorial ?
Answer is, when I take an entire array, otherwise, there is no answer.
Can anyone explain me solution of $$$C$$$ in simple words please , I mean why the greedy algorithm works
In the solution of C :
There is no need of checking if it is the root node or not because even if it is, it will make sz = 0 which will be handled in the
int t = dfs(dfs, 0, 0);
return (res > k || (t >= x && res == k) ? true : false);
so this can also work :
Can anyone explain what is wrong here https://codeforces.net/contest/1946/submission/254039662
Edit: I got it
In the editorial solution of problem E, the line res = max(res, (int)b.size()); updates res. But b.size() is non-increasing while we are moving from bit number 30 to bit number 0. If b.size() is non-increasing, why can't we simply print the first best answer of b.size() when discovered and then break out of the loop. I tried submitting it but I got WA, so I am missing something. Can anyone point out what am I missing?
Here's the submission link. The only difference between that submission and editorial's code is the break statement after res = max(res, (int)b.size())
Can somneone send me a solution for problem B using dp because i do not know how to apply dp to it?
well what the editorial did was dp just that it has a formal name called "Kadane's Algorithm" my solution is here.
For C, this is code that i have written:
i am getting runtime error on testcase 4 and i don't know why it is happening
Why does this code TLE for F?
https://codeforces.net/contest/1946/submission/254907291
I'm at 77 wrong submissions, my solution looks almost identical to the editorial.