Idea: BledDest
Tutorial
Tutorial is loading...
Solution (BledDest)
t = int(input())
for i in range(t):
n = int(input())
a = list(map(int, input().split()))
l, r = a.index(1), n - a[::-1].index(1) - 1
c = a.count(1)
print(r - l - c + 1)
Idea: BledDest
Tutorial
Tutorial is loading...
Solution (Neon)
#include <bits/stdc++.h>
using namespace std;
int main() {
int t;
cin >> t;
while (t--) {
int n, k;
cin >> n >> k;
vector<int> a(n), x(n);
for (auto& it : a) cin >> it;
for (auto& it : x) cin >> it;
vector<long long> s(n + 1);
for (int i = 0; i < n; ++i) s[abs(x[i])] += a[i];
bool ok = true;
long long lft = 0;
for (int i = 1; i <= n; ++i) {
lft += k - s[i];
ok &= (lft >= 0);
}
cout << (ok ? "YES" : "NO") << '\n';
}
}
Idea: Roms
Tutorial
Tutorial is loading...
Solution (Roms)
#include <bits/stdc++.h>
using namespace std;
const int N = 300043;
int t;
int n, m;
int a[N];
long long sum[N];
int cnt1[N];
int main() {
ios_base::sync_with_stdio(false);
cin >> t;
for (int tc = 0; tc < t; ++tc) {
cin >> n >> m;
memset(sum, 0, sizeof(sum[0]) * (n + 5));
memset(cnt1, 0, sizeof(cnt1[0]) * (n + 5));
for (int i = 0; i < n; ++i) {
cin >> a[i];
sum[i + 1] = sum[i] + a[i];
cnt1[i + 1] = cnt1[i] + (a[i] == 1);
}
for (int i = 0; i < m; ++i) {
int l, r;
cin >> l >> r;
--l;
int cur_cnt1 = cnt1[r] - cnt1[l];
long long cur_sum = sum[r] - sum[l];
if((r - l) + cur_cnt1 <= cur_sum && r - l > 1)
cout << "YES\n";
else
cout << "NO\n";
}
}
return 0;
}
Idea: BledDest
Tutorial
Tutorial is loading...
Solution (Neon)
#include <bits/stdc++.h>
using namespace std;
int main() {
int t;
cin >> t;
while (t--) {
int n;
cin >> n;
vector<int> a(n);
for (auto& x : a) cin >> x;
vector<int> ans(n, n);
for (int z = 0; z < 2; ++z) {
vector<long long> s(n + 1);
for (int i = 0; i < n; ++i) s[i + 1] = s[i] + a[i];
vector<int> p(n, -1);
for (int i = 1; i < n; ++i) {
int j = (z ? n - i - 1 : i);
int l = 1, r = i;
while (l <= r) {
int m = (l + r) / 2;
if (s[i] - s[i - m] > a[i] && p[i - 1] >= i - m) {
ans[j] = min(ans[j], m);
r = m - 1;
} else {
l = m + 1;
}
}
if (a[i - 1] > a[i]) ans[j] = 1;
p[i] = (a[i] == a[i - 1] ? p[i - 1] : i - 1);
}
reverse(a.begin(), a.end());
}
for (int i = 0; i < n; ++i)
cout << (ans[i] == n ? -1 : ans[i]) << ' ';
cout << '\n';
}
}
Idea: BledDest
Tutorial
Tutorial is loading...
Solution (awoo)
#include <bits/stdc++.h>
#define forn(i, n) for (int i = 0; i < int(n); i++)
using namespace std;
int n;
vector<int> a;
vector<vector<int>> g;
long long ans;
vector<map<int, int>> cnt;
void dfs(int v, int p = -1){
int bst = -1;
for (int u : g[v]) if (u != p){
dfs(u, v);
if (bst == -1 || cnt[bst].size() < cnt[u].size())
bst = u;
}
for (int u : g[v]) if (u != p && u != bst){
for (auto it : cnt[u]){
int x = it.first, y = it.second;
if (x != a[v]) ans += cnt[bst][x] * 1ll * y;
cnt[bst][x] += y;
}
}
if (bst != -1) swap(cnt[bst], cnt[v]);
ans += cnt[v][a[v]];
cnt[v][a[v]] = 1;
}
int main() {
cin.tie(0);
ios::sync_with_stdio(false);
int t;
cin >> t;
while (t--){
int n;
cin >> n;
a.resize(n);
forn(i, n) cin >> a[i];
g.assign(n, {});
forn(_, n - 1){
int v, u;
cin >> v >> u;
--v, --u;
g[v].push_back(u);
g[u].push_back(v);
}
ans = 0;
cnt.assign(n, {});
dfs(0);
cout << ans << '\n';
}
return 0;
}
Idea: adedalic
Tutorial
Tutorial is loading...
Solution (adedalic)
#include<bits/stdc++.h>
using namespace std;
#define fore(i, l, r) for(int i = int(l); i < int(r); i++)
#define sz(a) int((a).size())
#define all(a) (a).begin(), (a).end()
typedef long long li;
typedef pair<int, int> pt;
const int INF = int(1e9);
const int MOD = int(1e9) + 7;
int add(int a, int b) {
a += b;
if (a >= MOD)
a -= MOD;
return a;
}
int mul(int a, int b) {
return int(a * 1ll * b % MOD);
}
namespace SuffixArray {
string s;
vector< array<int, 2> > classes;
vector<int> build(const string &bs) {
s = bs;
s += '$';
vector<int> c(all(s)), ord(sz(s));
iota(all(ord), 0);
classes.resize(sz(s));
for (int len = 1; len < 2 * sz(s); len <<= 1) {
int half = len >> 1;
fore (i, 0, sz(s))
classes[i] = {c[i], c[(i + half) % sz(s)]};
sort(all(ord), [&](int i, int j) {
return classes[i] < classes[j];
});
c[ord[0]] = 0;
fore (i, 1, sz(ord))
c[ord[i]] = c[ord[i - 1]] + (classes[ord[i - 1]] != classes[ord[i]]);
}
c.pop_back();
for (int &cc : c)
cc--;
return c;
}
};
int n, k;
string s;
inline bool read() {
if(!(cin >> n >> k))
return false;
cin >> s;
return true;
}
string calcZero(string s) {
int rem = k;
int pos = 0;
for (int i = sz(s) - 1; rem > 0 && i >= 0; i--) {
if (s[i] == '1')
continue;
while (pos < sz(s) && s[pos] == '0')
pos++;
if (pos >= i)
break;
swap(s[pos], s[i]);
rem--;
}
return s.substr(s.find('1'));
}
string calcOne(string s) {
reverse(all(s));
auto c = SuffixArray::build(s);
int cntOnes = count(all(s), '1');
array<int, 3> mn = { 2 * sz(s), INF, -1 };
int u = 0, curOnes = 0;
fore (i, 0, n) {
while (u < sz(s) && (u - i < cntOnes || cntOnes - curOnes > k - 1)) {
curOnes += s[u] == '1';
u++;
}
if (u - i < cntOnes || cntOnes - curOnes > k - 1)
break;
array<int, 3> curAns = { u - i, c[i], i };
mn = min(mn, curAns);
curOnes -= s[i] == '1';
}
assert(mn[2] >= 0);
string res = s.substr(mn[2], mn[0]);
int toAdd = cntOnes - count(all(res), '1');
for (int i = sz(res) - 1; toAdd > 0 && i >= 0; i--) {
if (res[i] == '0') {
res[i] = '1';
toAdd--;
}
}
return res;
}
inline void solve() {
auto s1 = calcZero(s);
auto s2 = calcOne(s);
if (sz(s1) > sz(s2) || (sz(s1) == sz(s2) && s1 > s2))
swap(s1, s2);
int res = 0;
for (char c : s1)
res = add(mul(res, 2), c - '0');
cout << res << endl;
}
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
int tt = clock();
#endif
ios_base::sync_with_stdio(false);
cin.tie(0), cout.tie(0);
cout << fixed << setprecision(15);
if(read()) {
solve();
#ifdef _DEBUG
cerr << "TIME = " << clock() - tt << endl;
tt = clock();
#endif
}
return 0;
}
I think E has same idea as this
Problem C video Editorial : YOUTUBE VIDEO LINK Audio : English
Bro in problem C why we can not only check the no. of ones in subarray, if no. of one is greater than (r-l)/2 than not possible . why my solution fail can you tell me. Pease Don't downvote me.
Test case :
1
6 1
1 1 1 1 4 4
1 6
You can choose array b as : 2 2 2 2 2 2
The main thing is ... You have to place 1 (in array b) on the elements which are not equal to 1 (in array a). and After placing 1, the remaining sum should be distributed among the elements which are 1 in array a. I hope it is clear
You can refer to the video solution for detailed explanation.
I got it
bro sent your code as a spoiler or as a link
Bro why is so dowvnvotes in my question
Because of the way you wrote the code in your comment, the first few lines have a very large font size , and the rest are very small with no ne lines for each statement , it's not readable and it takes up a lot of space.it's a common practice to put your code in a spoiler
E can be solved linearly.
edit : I lost some rating but I think the problems were very nice :)
It was very beautiful solution.
Can you explain this solution a bit ,like what is going in the dfs call
At any point in the dfs, we keep track of how many beautiful paths we can create if we encounter every given color (the array stores it). So when we enter a vertex in the dfs, we can increment the beautiful path counter by the value for the color in the array.
Then, on each subtree we explore from this new call to dfs, we just need to reset the value of the current color to 1, because it will block any path to previously visited vertices of the same color.
Finally, when leaving the dfs, we should increase the initial value of the current color by one, because there will be one more beautiful path if we encounter the same color again. Try drawing what happens, it helps.
Thank you for the explanation. Indeed a very beautiful solution.
can you explain it a little more , please! specially the 3rd paragraph!
Outstanding solution, thank you for sharing!
Wow, exactly like my solution: 248115690. The human brain is mysterious!
So elegant!
Wow, I just see a solution of E which takes $$$O(n)$$$ time. It just uses 1 DFS to calculate the result synchronously just by how we will do to find out the result of one color.
I solved E with centroid decomposition (which is $$$O(n log n)$$$) but got TLE.247977712
But the Editorial says authors were expecting $$$O(n log^2 n)$$$ solutions. I'm confused.
Are you sure it is $$$O(n log n)$$$? It can't pass even if you increase time limit to $$$15$$$ seconds.
I check it again, I think all the things I do in function
CD
is linear.$$$T(n) = 2T(\frac{n}{2})+O(12 n)$$$(worst case of recurrance) implies that my solution has a time complexity of $$$O(n log n)$$$(Master theorem). Maybe I implement something wrong.(Or learnt centroid decomposition or Master theorem in the wrong way?)
Btw, how did you know my code would TLE even if the time limit was 15 seconds?
You can create a mashup and increase TL to 15 seconds. I don't know centroid decomposition lol:(
I checked again. It works $$$8$$$ seconds on test where $$$t = 1, n = 20000, a[i] = 1$$$ and graph edges = $$$(1, 2), (2, 3), (3, 4)$$$, .... So i think it's $$$n^2$$$.
Yes, you are probably right, my solution should be $$$O(n log n)$$$ indeed. However, the way i find centroids is wrong, causing my solution having $$$O(n^2)$$$ complexity. I fixed that and got AC.
Thanks a lot!
Hello Codeforces! Please explain what virtual trees are (they are used in the second solution of the problem E).
https://codeforces.net/blog/entry/76955
Need Help.
Why does this solution give tle while the one given in editorial passes for the problem 1923E - Count Paths.
248160733
It's probably because you are merging everything into the parent instead of merging smaller children into the largest child. Merging everything means more merging work, which takes more time.
Can someone please tell on which case this is failing?
void solve() {
}
Bro sent your code as a link or as a spoiler
case 1 1 1 3 => expected NO, you answer YES
case 1 1 2 2 => expected YES, you answer YES
I had an idea that works in O(n):
Check this code:
https://codeforces.net/contest/1923/submission/247999826
i have solved E linearly. Here is the solution
here ans[c] means how many visited nodes of color c are available to be the first vertex .
when we are going forward we can only add 1 node of that color with new node. And when we go backward we are getting 1 extra node to add with new nodes.
That's so elegant!
Lovely solution! very elegant. I solved it in O(n) as well, but not nearly as beautiful. 248719267
In problem B, why is it always optimal to hit the nearest monster? There can be a possibility that there was another monster far away with much much greater health, and if certain bullets were not provided to them within the starting seconds, they would definitely hit the origin.
If that's the case, then you are doomed, since if you don't kill the closer monster first he will kill you before the further monster reaches you.
I would love to know if someone can tell me why my solution to problem C is so slow (2308 ms)
I think it's because you used endl instead of '\n' at the end of outputting a new line. I don't quite know how this works exactly but endl does a operation called "flushing" after it outputs while '\n' doesn't. You might also want to add the following to improve your IO speed (This will only work with \n, endl will still slow your IO speed even if you add this code):
Here is my submission with endl and this code (1185 ms): submission
Here is my submission with '\n' and this code (296 ms): submission
For further information on how this works read this USACO guide article: Link
Thank you for your comment but I have defined endl as "\n"! Ill' add those 2 lines, thanks!
yes, that's correct i also getting reduced time with 186ms from 982ms but just writing those to lines...
The third case in Tutorial-C: If this sum is greater than ...
Change 'greater' to 'smaller'.
You can use divide and conquer on tree to solve problem E easily in $$$O(n \log n)$$$ too. It is easy to think.
yeah, just use centroid decomposition. But I use an unordered set and could AC it. How can you do it in Nlogn?
If someone want to consult a centroid decomposition + set solution can see my submission: 248033502. My idea: I can count the number of way cross the centroid node for all node and use set to manage the first node in DFS way has color C.
I am unable to understand why the solution to problem E has time complexity O(n log^2 n ). For every node, we are iteration through all the children, and then for every child we are iterating through every color it has. Can someone explain it to me?
It's a type of "DSU Merging". When a vertice's color set is iterated, the destination set size must be at least twice of the original one, so the total movement is at most $$$O(\log n)$$$ for each node, then the final movement is at most $$$O(n \log n)$$$. With the complexity of
map
, the final complexity is $$$O(n \log^2 n)$$$.Thanks! btw, I found this if anyone is looking for a detailed explanation.
I think there are some mistakes in tutorial for problem 1923C — Find B
To answer query l , r at first let's try to find the array b with the minimum value of ∑bi . For this, for indexes i , where ci=1 we have to set bi=2 , and for indexes i , where ci>i we have to set bi=1 . Thus, the sum of all elements in array b equal to cnt1⋅2+(r−l+1)−cnt1 or (r−l+1)+cnt1
. Now we have three cases:
If this sum is greater than ∑ri=lci then the answer is NO
If this sum is equal to ∑ri=lci then the answer is YES
If this sum is greater than ∑ri=lci then we can add this excess to the maximal element in array b In this case, the answer is YES
instead of ci>i, shouldn't it be ci>1 ?
In third case, shouldn't it be smaller instead of greater?
My solution for C got hacked. It seems somes python solutions are just taking more time. Can somebody explain ?
Bro sent your code as a link or as a spoiler
About F's fact 4, two binary strings should have same number of '1'. Is it true?
If $$$s1=s2$$$, there will be same number of $$$1$$$'s. Otherwise, $$$s1<s2$$$ or $$$s2<s1$$$, and they don't have the same number of $$$1$$$'s. The same relationship will hold between $$$s1'$$$ and
Unable to parse markup [type=CF_MATHJAX]
since the fact mentions that we are replacing the same number of $$$0$$$'s with $$$1$$$'s in both strings.For this problem though, $$$s1'$$$ and $$$s2'$$$ will always contain the same number of $$$1$$$'s. But this condition is not necessary for $$$s1$$$ and $$$s2$$$.
Thanks!
I solved E using stack for each color.
Here is my solution: 248216745
Can someone tell me why my solution for B is not working.
My Submission
Approach — stored the sorted vector (distance,hp) in vector p. Then,iterated through vector p and for each monster, calculated the time it would need to be killed by the forumla hp/k. also calculated bullets left in extra. at the start of each iteration , subtracted extra bullets
D was a realy good question. Great Contest
248285844 Problem D Whats wrong with my code please help. I just don't get why am i getting wrong answer.
Explaining Tourist's Solution to D Problem
Objective: For each slime, find min no of operations to eat that slime. One Operation is eating of one slime by an adjacent bigger slime.
Some observations:
Now in other cases
If there is a consecutive set of equal-sized slimes, they can't do any operation on each other.
Any merging operation to make a bigger slime are made
either
to the LEFTor
RIGHT, not including the index of the slime.Looking at one side (left or right), there must be
at least 2 different-sized slimes
on this side.Since eventually the adjacent slime needs to get bigger than the current slime, we need to look for the min slimes to include on one side, which has its
prefix sum > current slime size
, andno of distinct slimes > 1
.Since the prefix sum is monotonous for a contiguous range, we can perform
binary search
on both side to find the min number of slimes to include to satisfy the condition.Explanation of setting limits for binary search:
LEFT
keep thelo
limit at0
, andhi
limit atthe largest index
where slimesize is not equal
to that of theslime to the immediate left
of current slime. Because need to ensure at least 2 different-sized slimes be included in the range.Pre-processing:
l(n)
andr(n)
: Store the begin and end index of the contiguous equal-sized slime windows.p(n+1)
: prefix sum array : its monotonicity property helps use binary search.Overall Time Complexity:
O(nlogn)
O(n)
time for preprocessingO(nlogn)
time for the core implementation:logn
time to binary search toLEFT
andRIGHT
of the current index.248285844 Problem D Whats wrong with my code please someone help. I just don't get why am i getting wrong answer.
Try checking the ok condition before starting the binary search. Sorry , you have already done it. I juat saw it
I found the mistake. I was using return type 'int' for 'summ', but shouldn't it give integer overflow for this?
For Problem E, I came up with this simple solution.
Nice solution, you can replace your map with an array for a better complexity.
Oh yes! Thanks!
Would you like to explain ?
Whenever you visit a node s, a[s] is the value of that node. m[a[s]] will take count of how many nodes have value a[s] before this node with which we can make pair, I mean no other a[s] comes in a way.
Suppose you visit s, than for a node lying in its subtree there is no other way to make pair instead of s, because s comes in between any other s. So making m[a[s]]=1
While coming out of the highest level s increasing the number of s by 1 for next s.
can we solve C using sparse table ?
Can anybody tell , for problem 2 why this code is giving error on test case 2 , https://codeforces.net/contest/1923/submission/248233287
For problem D : Can anyone tell why is this code giving wrong answer??248345962
Can someone explain why jiangly's solution to problem E works in O(Nlog^2N). I know it utilizes small to large merging, but why does merging by the size of color sets yield that complexity? When we merge some node's children, set size won't necessarily go up 2x. For example combining sets (1,2,3) and (1,2,3) will give us (1,2,3) again, so the regular proof does not apply here.
Edit: Nvm, I neglected the fact that number of colors is always less or equal than the number of nodes in a subtree. The proof is trivial.
How does one come up with solutions like that of problem C? :/
The goal is to make the elements different from the original, whilst not changing the sum.
An easy thing to do, is decrement one number and increment another. By doing this, we've changed both the numbers from the original whilst still having the same sum.
Now obviously, we cannot decrement 1, because the third rule states any number must be > 0. Therefore, we are forced to
increment all 1s
, and forced todecrement all numbers > 1
.It's easy to see that this is only possible, if the total of all numbers > 1 is not less than the total of numbers equal to 1.
We can then, of course answer the range queries using prefix/suffix sum.
I do not understand why does my submission not pass the second test case and what am I doing wrong here. Can someone please explain? 248388078
I made video editorial for E discussing the DP idea (excluding the small to large trick).
I also made a contest with easy version of problem E and some followup problems. Access it here
Thanks
Problem F's "Fact 4",You should specify that s'1 and s'2 have the same number of 1s , otherwise your statement is wrong.
s1=1000011 s2=1000100 k-1=2 s'1=1001111 s'2=1000111 s1 < s2 and s'1 > s'2
Done
For E with Virtual Trees — I think you can do just an modified Euler walk of the tree first, and then walk on only the nodes of the same color in the order of the Euler walk: it will be eqv. to doing DFS on the Virtual Tree. So should be easy to do DP in each node for all its top-level children with/without being a direct descendent of the parent.
The final complexity should be just O(n), better than the O(n log n) of the tutorial.
The modification of the walk is that you put the node each time you visit it, not only first & last time.
can someone give me the testcase where this solution (https://codeforces.net/contest/1923/submission/248724413)
In problem D,my code got a "Wrong Answer" on test 2. Can anyone give me a hack? Many thanks.
https://codeforces.net/contest/1923/submission/248850524
Problem C : what's the mistake in this code? Please help. https://codeforces.net/contest/1923/submission/249311392
Is there a typo for the editorial for problem C? Should it be, "and for indexes $$$i$$$, where $$$c_i > 1$$$" instead of $$$c_i > i$$$? That makes more sense to me.
How can we do B using DP?