1579A - Casimir's String Solitaire
Idea: MikeMirzayanov
Tutorial
Tutorial is loading...
Solution
#include <bits/stdc++.h>
using namespace std;
int main() {
int t;
cin >> t;
while (t--) {
string s;
cin >> s;
cout << (count(s.begin(), s.end(), 'B') * 2 == s.size() ?
"YES\n" : "NO\n");
}
}
Idea: doreshnikov
Tutorial
Tutorial is loading...
Solution
#include <bits/stdc++.h>
using namespace std;
typedef pair<int, int> pii;
int main() {
int t;
cin >> t;
while (t--) {
int n;
cin >> n;
vector<int> a(n);
vector<pii> actions;
for (int i = 0; i < n; i++)
cin >> a[i];
for (int i = 0; i < n - 1; i++) {
int min_pos = i;
for (int j = i + 1; j < n; j++)
if (a[j] < a[min_pos])
min_pos = j;
if (min_pos > i) {
actions.push_back({i, min_pos});
int opt = a[min_pos];
for (int j = min_pos; j > i; j--)
a[j] = a[j - 1];
a[i] = opt;
}
}
cout << actions.size() << '\n';
for (auto &lr: actions) {
cout << lr.first + 1 << ' ' << lr.second + 1 << ' ' << lr.second - lr.first << '\n';
}
}
}
Idea: MikeMirzayanov
Tutorial
Tutorial is loading...
Solution
#include <bits/stdc++.h>
using namespace std;
int main() {
int t;
cin >> t;
while (t--) {
int n, m, k;
cin >> n >> m >> k;
vector<vector<int>> status(n, vector<int>(m, 0));
for (int i = 0; i < n; i++) {
string s;
cin >> s;
for (int j = 0; j < m; j++)
if (s[j] == '*')
status[i][j] = 1;
}
for (int i = n - 1; i > -1; i--) {
for (int j = 0; j < m; j++) {
if (status[i][j] == 0)
continue;
int len = 0;
while (j > len && j + len + 1 < m && i > len) {
if (status[i - len - 1][j - len - 1] == 0 || status[i - len - 1][j + len + 1] == 0)
break;
len++;
}
if (len >= k) {
for (int d = 0; d <= len; d++) {
status[i - d][j - d] = 2;
status[i - d][j + d] = 2;
}
}
}
}
bool ok = true;
for (int i = 0; i < n; i++)
for (int j = 0; j < m; j++)
if (status[i][j] == 1)
ok = false;
cout << (ok ? "YES" : "NO") << '\n';
}
}
Idea: doreshnikov
Tutorial
Tutorial is loading...
Solution
#include <bits/stdc++.h>
using namespace std;
typedef pair<int, int> pii;
int main() {
int t;
cin >> t;
while (t--) {
int n;
cin >> n;
auto cmp = [](pii const &x, pii const &y) {
return x > y;
};
set<pii, decltype(cmp)> a(cmp);
vector<pii> answer;
for (int i = 0; i < n; i++) {
int ai;
cin >> ai;
if (ai > 0)
a.emplace(ai, i + 1);
}
while (a.size() > 1) {
auto p1 = *a.begin();
a.erase(a.begin());
auto p2 = *a.begin();
a.erase(a.begin());
answer.emplace_back(p1.second, p2.second);
if (p1.first > 1) a.emplace(p1.first - 1, p1.second);
if (p2.first > 1) a.emplace(p2.first - 1, p2.second);
}
cout << answer.size() << '\n';
for (auto &p : answer) {
cout << p.first << ' ' << p.second << '\n';
}
}
}
1579E1 - Permutation Minimization by Deque
Idea: MikeMirzayanov
Tutorial
Tutorial is loading...
Solution
#include <bits/stdc++.h>
using namespace std;
int main() {
int t;
cin >> t;
while (t--) {
int n, a;
cin >> n;
deque<int> d;
for (int i = 0; i < n; i++) {
cin >> a;
if (d.empty() || a < d[0])
d.push_front(a);
else
d.push_back(a);
}
for (int x : d)
cout << x << ' ';
cout << '\n';
}
}
1579E2 - Array Optimization by Deque
Idea: doreshnikov
Tutorial
Tutorial is loading...
Solution
#include <bits/stdc++.h>
#include <ext/pb_ds/assoc_container.hpp>
using namespace std;
using namespace __gnu_pbds;
typedef pair<int, int> node;
typedef tree<node, null_type, less<node>,
rb_tree_tag, tree_order_statistics_node_update> ordered_set;
int main() {
int t;
cin >> t;
while (t--) {
int n;
cin >> n;
ordered_set s;
long long cnt = 0;
for (int i = 0; i < n; i++) {
int a;
cin >> a;
int less = s.order_of_key(node(a, 0)),
greater = i - s.order_of_key(node(a, n));
cnt += min(less, greater);
s.insert(node(a, i));
}
cout << cnt << '\n';
}
}
1579F - Array Stabilization (AND version)
Idea: doreshnikov
Tutorial
Tutorial is loading...
Solution
#include <bits/stdc++.h>
using namespace std;
int main() {
int t;
cin >> t;
while (t--) {
int n, d;
cin >> n >> d;
vector<int> a(n);
for (int i = 0; i < n; i++)
cin >> a[i];
vector<bool> used(n, false);
bool fail = false;
int res = 0;
for (int i = 0; i < n; i++) {
if (used[i])
continue;
int cur = i, pref = 0, last = 0, iter = 0, ans = 0;
do {
used[cur] = true;
if (a[cur] == 0) {
ans = max(ans, last);
last = 0;
} else {
last++;
if (iter == pref) {
pref++;
}
}
cur = (cur + d) % n;
iter++;
} while (cur != i);
if (iter != pref)
ans = max(ans, pref + last);
else {
fail = true;
break;
}
res = max(res, ans);
}
if (fail)
cout << "-1\n";
else
cout << res << '\n';
}
}
Idea: doreshnikov, MikeMirzayanov
Tutorial
Tutorial is loading...
Solution
#include <bits/stdc++.h>
using namespace std;
const int INF = 1000000000;
int main() {
int t;
cin >> t;
while (t--) {
int n;
cin >> n;
vector<int> a(n);
int maxl = 0;
for (int i = 0; i < n; i++) {
cin >> a[i];
maxl = max(maxl, a[i]);
}
vector<vector<int>> dp(n + 1, vector<int>(2 * maxl + 1, INF));
dp[0][0] = 0;
for (int i = 0; i < n; i++) {
for (int left = 0; left <= 2 * maxl; left++) {
if (dp[i][left] == INF)
continue;
dp[i + 1][max(left - a[i], 0)] = min(dp[i + 1][max(left - a[i], 0)], dp[i][left] + a[i]);
if (left + a[i] < dp[i + 1].size()) {
dp[i + 1][left + a[i]] = min(dp[i + 1][left + a[i]], max(dp[i][left] - a[i], 0));
}
}
}
int ans = 2 * maxl + 1;
for (int left = 0; left <= 2 * maxl; left++)
ans = min(ans, left + dp[n][left]);
cout << ans << '\n';
}
}
Please fix the code for problem G.
Similar stuff in video format :
video solutions
Really sorry the editorial took longer than usual. I'll try to post it sooner next time!
Still, this is one of the highest-quality editorials I've seen, great job!
problem B : solution -> actions.push_back({l,min_pos}); "l" is not defined here.
Yep, sorry, fixed
Regarding E2:- I have faced similar kind of concept in many problems "finding elements smaller or larger than current element occurring before it"... someone please tell me a source to learn __gnu_pbds::tree (or) balanced binary search tree (or) any best way to solve this.
https://codeforces.net/blog/entry/11080 This blog by Adamant is perfectly describing how to use ordered_set.
in problem A how can it handle "BABA" case its answer should NO but according to above solution its give YES
the string is of length 4, and there are 2 B's which is half the size so it would print yes.
But I guess, it clearly mentioned that we can't pick two adjacent character
They don't have to be adjacent, but they can be.
There is a nice way to implement G with bitset. Let's do a binary search on answer. DP for checking if we can stay inside the segment of fixed length $$$m$$$ looks like this:
Basically bit $$$i$$$ in $$$d$$$ being equal to 1 after processing some numbers from $$$a$$$ means that we could be on position $$$l+i$$$ of some segment $$$[l, r]$$$ without going out of its borders until this point. If at least one bit is 1 after this cycle then the answer is $$$>=m$$$, otherwise it's $$$<m$$$. Complexity is $$$O(n\cdot \frac{l_{max}}{64} \cdot \log(l_{max}))$$$ which is pretty much the same.
I solved it like this too. Very elegant.
Can you please explain what we are doing in this line
for(auto x : a) d = ((d>>x)|(d<<x))&cut;
is working,sorry to bother you but my potato brain is unable to understand and visualise.
After i loops, j'th bit of b represents whether it is possible to reach position j after considering i segments, or in other words, j'th bit represents whether it is possible for the "end" of i'th segment to be at position j. Now, say we are in the i'th loop. j'th bit of b currently represents whether it is possible to reach position j after considering i-1 segments. So, if we consider the i'th segment now, position j is reachable if and only if either position j-x is reachable after i-1 segments or position j+x is reachable after i-1 segments. This transition is equivalent to
which is equivalent to
But the issue is that bitsets have to have constant size at compile time. So, we set the size to the max possible size that we might require, and after each loop we set b[j]=0 for all j>m, which is a way of saying that it is not possible for any segment to end at j>m. A simple way to do this for any bitset b is
where cut has j'th bit=1 for j<=m, and 0 for j>m.
After n iterations, j'th bit of b represents whether it is possible to reach position j if we start from any of the positions p for 0<=p<=m. If this is true for any j, where 0<=j<=m, then we return true, else we return false.
Hope you understood that. Sorry I'm not good with markdown.
Got it, thanks!
I had the same idea but messed something up
Why do you use
&cut
?You actually need a bitset of size $$$m+1$$$ but you can't do that in runtime. It's basically emulating that.
https://codeforces.net/contest/1579/submission/130289614 pls tell me why this soln for D gets WA if u have time. PS : ignore it, i got it why im getting it rong..
i wonder why this solution: https://codeforces.net/contest/1579/submission/130179380 got WA but same logic just few changes got AC:https://codeforces.net/contest/1579/submission/130194128
and above first solution is more optimized!! can someone give a TC where is fails?
try this 1 4 4 4 3 1
I reflected a LOT on problem G and i observed the dp solution, but didnt come up with the idea that answer never exceeds 2.lmax :( beautiful problem over all.
Can you tell me how mysolution is getting a wrong answer even though I did it exactly how the editorial says . https://codeforces.net/contest/1579/submission/130186425
You are inserting into the beginning of the vector, which takes O(n), because it shifts everything for one step. Better solution would be use data structures that allows you to add elements in O(1). It can be deque, list (which is based on linked list). Another solution is using vector with two pointers in the middle (and shift them while you are adding elements to the left or to the right). Thus you don`t need to shift all elements each time when you need to put smth in the beginning. Your solution fails on tests like (200000, 199999, 199998...), because time complexity becomes O(n^2), which it too slow for this task.
Alternative solution for 1579D - Продуктивная встреча. Think of following question: what arrays $$$a$$$ you can turn into 0? Suppose sum of $$$\sum\limits_{i=1}^n a_i = S$$$ (same as in editorial). If $$$S$$$ is odd — obviously we can't. Also, by simplest pigeonhole principle we know if there is $$$a_i > S/2$$$, then we can't reduce all $$$a_i$$$ to zero. But, if there is $$$a_i > S/2$$$ it's easy to see that it's maximum of array, and it's unique.
So idea to decrease corresponding $$$a_i$$$ in a way to make it possible to turn them into zero. To do that, calculate $$$S$$$. Find $$$a_i > S/2$$$ if it exists. And, decrease it by one while $$$a_i > S/2$$$ still holds (decrease $$$S$$$ correspondingly). Because this maximum was unique, no need to search anything else. Now $$$S$$$ can be odd, but we'll handle it later. Now, we'll make list of all participants with their repetition. So, in case $$$a$$$ = [1,2,3] we will get $$$p$$$ = [1,2,2,3,3,3]. Its length is $$$S$$$, and if it's odd, remove last one element. Now, I claim answer is pairs of $$$(p_i, p_{i + len(p)/2})$$$. The only thing to prove that is left is: $$$p_i \neq p_{i + len(p)/2}$$$, but this follows from $$$a_i \leq S/2$$$. Time complexity is $$$O(S)$$$. 130295963
When up-solving, I tried to solve Question E2. I have been getting TLE on test case 3.
For each element, I counted the number of smaller and greater elements before it. The final result is the sum over each element the minimum of the number of smaller and the greater numbers before it. To find the number of the smaller and greater numbers before any element in the array, I used the method 3 given on this website : Code Used. The Time complexity for the same is O(nlogn).
Any help regarding where I went wrong would be greatly appreciated. My submission.
The BST code you've used does not maintain any balancing, and as a result, might work in O(n) per insert. Try to insert consecutive integers and you'll notice, the height of the tree is linear to the size.
There are efficient BST variants, like Red-Black Tree, AVL Tree or Splay Tree, which are a bit more complicated, but maintain O(log(n)) time per query (amortized or not).
For this problem though, you could use ordered_set from pbds. Refer to this blog by Adamant for more info https://codeforces.net/blog/entry/11080.
Thank You!! The explanation helped a lot. And yeah, I tried using ordered_set from pbds and my solution got accepted.
Can you help me ? Why it is giving TLE : E2 Code
and it is accepted : E2 Code
Thanks in advance. map is used just to store frequency
Perhaps there is an anti-hash test there. Because of it, unordered_map can run in $$$O(n)$$$.
No need to use map if you use ordered_set. both structures may yield high working times resulting in your TLE.
I was going to answer a comment with a different proof of D (always take the two largest elements) that I came up with, but it got so long that I figured it would look better here with spoilers and math mode. This is mostly a result of bashing out a ton of different cases that could happen. Feel free to point out any mistakes (which can include forgetting to format somewhere).
There are two cases (as the editorial mentions):
In this case, it's optimal to use the largest element in every operation (an upper bound on the answer is the sum of the non-largest elements, and doing an operation without the largest will give us 1 operation but decrease the upper bound by 2), and doing the two largest every time will accomplish this.
Let's do our operations in such a way that this (invariant) is always true: the largest minus the smallest element is at most 1. Why? Because the final state will be a bunch of 0's, and maybe a single 1 if the whole sum is odd.
If the largest minus the smallest is at most 1, and we have a 0, then the largest element must be at most 1. If we only have 0's and 1's, then we can pair the 1's until we can't anymore, and we either get all 0's (even sum) or all 0's and one 1 (odd sum). In both cases, this is optimal — there's no way you could have done more than $$$\lfloor \frac{sum}{2} \rfloor$$$ operations, and having that end state means that you did exactly that many operations (each operation subtracts 2, so the number of operations is $$$\frac{initial \, sum - final \, sum}{2}$$$, you can do out the cases if you want).
So if we can make sure that the largest minus the smallest element is at most 1, we're guaranteed to reach this final optimal state.
If $$$(largest - smallest) = 1$$$, then we wouldn't decrease smallest without also decreasing largest, since we prefer to take larger elements [do casework on whether you have 1, 2, or more than 2 duplicates of the largest element]. If $$$(largest - smallest) = 0$$$, it doesn't matter what we do in this operation, $$$(largest - smallest)$$$ can become 1 at worst. So if this condition is true, then it will stay true, and our final state will be optimal.
So the last part is if this condition isn't initially true, then we need to make it true. Say we have x copies of the maximum element.
If $$$x \geq 2$$$, then we'll decrease pairs of these maximum elements until we have $$$x \mod 2$$$ copies remaining. Note that because we assume $$$(largest - smallest) \geq 2$$$ [the condition isn't initially met], it's always valid to decrease these maximum elements, because they must be at least 2. If x was even, then the largest element decreases by 1, and we're closer to our goal of $$$(largest - smallest) \leq 1$$$. If x was odd, we get...
$$$x = 1$$$. With our algorithm of selecting the two largest, every operation will decrease the largest by 1, because we only have 1 copy of the largest. Sometimes we'll decrease the smallest, sometimes we won't, but no operation can increase $$$(largest - smallest)$$$.
At the beginning, we asserted that the largest element $$$\leq$$$ sum of all other elements.
We'll always take one away from the largest element, so the largest element will decrease every time the sum of the rest decreases. If something else becomes the largest element since it wasn't part of the operation, and it's at least $$$2$$$, then you have at least two elements that are at least $$$(2 - 1)$$$ that were part of the operation. The only time where the sum of all other elements can become smaller is in the final state, if the sum of all elements is odd, but that state is optimal anyway.
Why am I bringing this up? Because if we assume that to be true (which it is), then in the $$$x = 1$$$ case, there's no way that the single maximum element can remain the unique maximum forever. As long as $$$(largest - smallest) \geq 2$$$, then the largest element $$$\geq 2$$$, so the remaining sum $$$\geq 2$$$, and we have at least 2 elements, so we can continue doing operations. These operations will continue until either that element is no longer the unique maximum (so $$$x > 1$$$), $$$(largest - smallest)$$$ becomes $$$\leq 1$$$ naturally, or that element becomes less than 2, in which case $$$(largest - smallest) \leq 1$$$ anyway. In all cases, we will end up closer to our goal of $$$(largest - smallest) \leq 1$$$.
The fear may be that we can run out of operations while $$$(largest - smallest) \geq 2$$$. But in the casework on x above, it's shown that if $$$(largest - smallest) \geq 2$$$, we will always still have operations that we can do. So while $$$(largest - smallest) \geq 2$$$, we will have operations to do, and these operations can never increase $$$(largest - smallest)$$$. We can't do infinite operations, so eventually, $$$(largest - smallest)$$$ has to decrease to $$$\leq 1$$$, and the algorithm will succeed.
Yeah this was how I implemented a previous problem that asked the same thing. Solution here : 111076508
In problem G, Can someone explain why we are taking 0 as the leftmost boundary and never crossing that? I am stuck with this idea for two days.
In problem G ,we are maintaining the relative position of the start with respect to the left and right boundaries, not the actual position of start. So, if in any case we are going to cross the leftmost boundary of our segment, we would be assuming that leftmost point(<0) as the new leftmost boundary and start also.
That's why we are always maintaining 0 as the leftmost boundary and never crossing that. You can watch galen_colin's stream to get a better understanding. Link:- https://www.youtube.com/watch?v=JRuAgCCwi0M
Thank you so much. I finally get it.
Hey guys, in problem E2, I use discretion and Binary Indexed Tree to count numbers and it lead to TLE, but this solution complexity should be $$$O(nlogn)$$$, I do not know why this solution cannot ac, this is my code link
You memcpy the whole array while you need only n elements. Use vectors to avoid such mistakes.
I use the map insted of unordered map. It got AC 193683141
Any similar problems like problem G? I really liked the idea.
Hey guys, I've got TLE in E2, but I used multiset with lower/upper_bound which works with $$$\ O(logN) $$$ complexity. So, I guess that my submission should work with $$$\ O(NlogN) $$$ time. Here my submission 130358299, can you tell where I'm wrong.
https://codeforces.net/contest/1579/submission/130371833
We are in the same situation I guess
If it does not bother you, can someone please tell me why this submission : https://codeforces.net/contest/1579/submission/130371833 gets TLE, I thought its complexity was O(n log n) as expected by the correct answer.
I believe here the distance function takes O(n) time. It is much better to use a structure like __gnu_pbds::tree. You can read about it here.
You can read more about the Time complexity of distance function here.
Oh, yeah, huge thanks, I always assumed it was O(1).
For Problem D, will choosing the min and max element at each turn, work similar ?
It worked for me.My submission
Problem $$$F$$$ can be solved using $$$Binary\ Lifting$$$ although it is not optimal.
Time Complexity: $$$O(n\log{n})$$$
130189104
About the problem E2, I see someone write the solution like this:
I guess the query() and add() operation is to calculate some like partial sum or adjacent difference, but I don't understand how and why, can someone explain that? Thank you!
It's binary indexed tree (or Fenwick Tree)
Can anyone help me find why my code for D is failing? Any edge cases I need to check for? It always fails on tc 69.Anyone knows what that tc is :( ? https://codeforces.net/problemset/submission/1579/130623552 I did as said in editorial. Used a pq for choosing two persons with max sociability.
Consider the test case "2 2 2", correct answer is 3 conversations.
for D, will the following algorithm work? : " Choosing two people i and j such that ith person has maximum remaining sociability and j can be any other person with non zero sociability and stop this process when less than 2 people are left with non zero sociability "
In first point of Problem-D editorial, How is this true?
Graphical Solution for problem F is