Idea: fcspartakm
Tutorial
Tutorial is loading...
Solution 1 (awoo)
for _ in range(int(input())):
n = int(input())
a = set(map(int, input().split()))
ans = 0
for i in range(10000):
num = str(i)
num = (4 - len(num)) * '0' + num
used = set(num)
if len(used) != 2:
continue
d1 = int(used.pop())
d2 = int(used.pop())
if (not d1 in a) and (not d2 in a) and num.count(num[0]) == 2:
ans += 1
print(ans)
Solution 2 (fcspartakm)
#include <bits/stdc++.h>
using namespace std;
int n;
inline void read() {
cin >> n;
int x;
for (int i = 0; i < n; ++i)
cin >> x;
}
inline int fac(int n) {
int res = 1;
for (int i = 2; i <= n; i++) {
res *= i;
}
return res;
}
inline int c(int n, int k) {
return fac(n) / fac(k) / fac(n - k);
}
inline void solve() {
cout << c(10 - n, 2) * c(4, 2) << endl;
}
int main () {
int t;
cin >> t;
while (t--){
read();
solve();
}
}
Idea: BledDest
Tutorial
Tutorial is loading...
Solution (BledDest)
#include <bits/stdc++.h>
using namespace std;
void solve()
{
int n;
cin >> n;
cout << 1;
for(int i = n; i >= 2; i--)
cout << " " << i;
cout << endl;
}
int main()
{
int t;
cin >> t;
for(int i = 0; i < t; i++)
solve();
}
Idea: fcspartakm
Tutorial
Tutorial is loading...
Solution (awoo)
for _ in range(int(input())):
n = int(input())
s = '0' + input()
a = [0] + list(map(int, input().split()))
ans = 0
i = 0
while i <= n:
mn = a[i]
sm = a[i]
j = i + 1
while j <= n and s[j] == '1':
mn = min(mn, a[j])
sm += a[j]
j += 1
ans += sm - mn
i = j
print(ans)
1743D - Problem with Random Tests
Idea: BledDest
Tutorial
Tutorial is loading...
Solution (BledDest)
#include<bits/stdc++.h>
using namespace std;
char buf[1000043];
string normalize(const string& v)
{
int cnt = 0;
while(cnt < v.size() && v[cnt] == '0') cnt++;
if(cnt == v.size()) return "0";
return v.substr(cnt, int(v.size()) - cnt);
}
string operator |(const string& a, const string& b)
{
int sz = max(a.size(), b.size());
string ans(sz, '0');
for(int i = 0; i < a.size(); i++)
if(a[i] == '1') ans[i + sz - int(a.size())] = '1';
for(int i = 0; i < b.size(); i++)
if(b[i] == '1') ans[i + sz - int(b.size())] = '1';
return normalize(ans);
}
bool better(const string& a, const string& b)
{
if(a.size() != b.size()) return a.size() > b.size();
return a > b;
}
int main()
{
int n;
scanf("%d", &n);
string s;
scanf("%s", buf);
s = buf;
string ans = s | s;
int pos1 = s.find("1");
if(pos1 != string::npos)
{
int pos2 = s.find("0", pos1);
if(pos2 != string::npos)
{
int cur = pos1;
int not_needed = 0;
while(true)
{
if(cur == n || (s[cur] == '1' && cur > pos2)) break;
string nw = s | s.substr(pos1, n - pos1 - not_needed);
if(better(nw, ans)) ans = nw;
cur++;
not_needed++;
}
}
}
puts(ans.c_str());
}
Idea: BledDest
Tutorial
Tutorial is loading...
Solution (awoo)
#include <bits/stdc++.h>
#define forn(i, n) for (int i = 0; i < int(n); i++)
using namespace std;
const long long INF64 = 1e18;
int main() {
vector<int> ps(2);
vector<long long> ts(2);
int h, s;
forn(i, 2) scanf("%d%lld", &ps[i], &ts[i]);
scanf("%d%d", &h, &s);
long long ans = INF64;
vector<long long> dp(h + 1, INF64);
dp[0] = 0;
forn(i, h) for (int j = 1; j <= h - i; ++j) forn(k, 2){
int ni = min((long long)h, i + j * (ps[k] - s) + j * ts[k] / ts[k ^ 1] * (ps[k ^ 1] - s));
if (ni == h)
ans = min(ans, dp[i] + j * ts[k]);
if (j * ts[k] >= ts[k ^ 1]){
ni = min((long long)h, i + (j - 1) * (ps[k] - s) + (j * ts[k] / ts[k ^ 1] - 1) * (ps[k ^ 1] - s) + (ps[0] + ps[1] - s));
dp[ni] = min(dp[ni], dp[i] + j * ts[k]);
}
}
ans = min(ans, dp[h]);
printf("%lld\n", ans);
}
1743F - Intersection and Union
Idea: BledDest
Tutorial
Tutorial is loading...
Solution (BledDest)
#include<bits/stdc++.h>
using namespace std;
const int N = 300043;
typedef array<int, 2> vec;
typedef array<vec, 2> mat;
const int MOD = 998244353;
mat operator*(const mat& a, const mat& b)
{
mat c;
for(int i = 0; i < 2; i++)
for(int j = 0; j < 2; j++)
c[i][j] = 0;
for(int i = 0; i < 2; i++)
for(int j = 0; j < 2; j++)
for(int k = 0; k < 2; k++)
c[i][k] = (a[i][j] * 1ll * b[j][k] + c[i][k]) % MOD;
return c;
}
mat ZERO = {vec({3, 0}), vec({1, 2})};
mat ONE = {vec({1, 2}), vec({1, 2})};
mat t[4 * N];
void recalc(int v)
{
t[v] = t[v * 2 + 1] * t[v * 2 + 2];
}
void build(int v, int l, int r)
{
if(l == r - 1)
{
t[v] = ZERO;
}
else
{
int m = (l + r) / 2;
build(v * 2 + 1, l, m);
build(v * 2 + 2, m, r);
recalc(v);
}
}
void upd(int v, int l, int r, int pos, int val)
{
if(l == r - 1)
{
if(val == 0) t[v] = ZERO;
else t[v] = ONE;
}
else
{
int m = (l + r) / 2;
if(pos < m) upd(v * 2 + 1, l, m, pos, val);
else upd(v * 2 + 2, m, r, pos, val);
recalc(v);
}
}
int main()
{
ios_base::sync_with_stdio(0);
cin.tie(0);
int n;
cin >> n;
vector<vector<pair<int, int>>> v(N);
for(int i = 0; i < n; i++)
{
int l, r;
cin >> l >> r;
v[l].push_back(make_pair(1, i));
v[r + 1].push_back(make_pair(0, i));
}
build(0, 0, n - 1);
int cur = 0;
int ans = 0;
for(int i = 0; i <= 300000; i++)
{
for(auto x : v[i])
{
if(x.second == 0) cur = x.first;
else upd(0, 0, n - 1, x.second - 1, x.first);
}
ans = (ans + t[0][cur][1]) % MOD;
}
cout << ans << endl;
}
Idea: BledDest
Tutorial
Tutorial is loading...
Solution (BledDest)
#include<bits/stdc++.h>
using namespace std;
const int MOD = 998244353;
int add(int x, int y)
{
x += y;
while(x >= MOD) x -= MOD;
while(x < 0) x += MOD;
return x;
}
int sub(int x, int y)
{
return add(x, MOD - y);
}
int expected(int mask)
{
if(mask & 2) return 0;
return 1;
}
int last_bit(int x)
{
if(x == 0) return -1;
return x - (x & (x - 1));
}
bool go(int& a, int x)
{
if(expected(a) != x)
{
a = 1 << x;
return false;
}
a ^= (1 << x);
while(true)
{
int b = last_bit(a);
int c = last_bit(a - b);
if(c == 2 * b) a += b;
else break;
}
return true;
}
bool is_fib(int a)
{
return a == last_bit(a);
}
vector<pair<int, int>> go(const vector<pair<int, int>>& a, int x)
{
vector<pair<int, int>> nw;
for(auto b : a)
{
int cost = b.first;
int seqn = b.second;
if(go(seqn, x)) nw.push_back(make_pair(cost, seqn));
}
return nw;
}
int main()
{
ios_base::sync_with_stdio(0);
cin.tie(0);
int last = 1, sum = 1;
vector<pair<int, int>> w;
int n;
cin >> n;
for(int i = 0; i < n; i++)
{
string s;
cin >> s;
for(auto x : s)
{
int c = x - '0';
int ndp = sub(sum, last);
w = go(w, c);
for(int j = 0; j < w.size(); j++)
{
if(is_fib(w[j].second))
ndp = sub(ndp, w[j].first);
}
if(c == 1) w.push_back(make_pair(last, 2));
sum = add(sum, ndp);
last = ndp;
assert(w.size() <= 60);
}
cout << last << endl;
}
}
Thanks this round, I'm Master again!!!
Most problems are interesting and educational!!!
Agree!
Great problems and nice editorial!
Gready-Bruteforce Solution 176908398
There is an easier solution in F. For fixed integer x lets calculate dp[i] as amount of sets that include x formed with first i segments. Transition:
x < l[i] or r[i] < x => dp[i] = dp[i — 1] * 2 (last operation is: union => dp[i — 1], intersection => 0, difference => dp[i — 1])
l[i] <= x <= r[i] => dp[i] = 3 ^ (i — 2) * 2 (last operation is: union => 3 ^ (i — 2), intersection => dp[i — 1], difference => 3 ^ (i — 2) — dp[i — 1])
Let ind[x] be index of the last segment x belongs to. dp[n] for x can be calculated for O(1) as 3 ^ (ind[x] — 1) * 2 ^ (n — ind[x]). ind[x] is calculated for O((n + M)logn) using scanline. My solution: 176763501
I did something similar (after the contest) but didn't use dp at all.
I iterated through all values up to 3*10^5 keeping track of all currently active sets (sets that go through that point) and then for each point I would, with a heap, find the index of the set with the highest index and apply the same formula as you (3 ^ (ind[x] — 1) * 2 ^ (n — ind[x])).
My solution: 998244353 *176949836 (oops)
(The heap implementation isn't the best and also it's a min heap, when I actually needed a max heap, but I was to lazy to get a max heap, so instead I just did some changes in other places of the code so that it worked with a min heap)
When I just saw your submission link I thought "wait is that submission no. really that famous prime number?" and realized its just a link that looks like a submission markdown LOL
Haha, hadn't realized, I copied the wrong number
Print the maximum possible value you can get in binary representation without leading zeroes.
BledDest
This is not the answer that your code gives in some tests.
This is same as: "I only want people with similar solution to mine pass and the rest fail".
https://codeforces.net/blog/entry/108148
Does it mean that in any question, I can write a solution which works lets say 0.78657 of time ?
By saying that, you're undermining the usefulness of any non-deterministic/approximation algorithm.
Yes, Im. But im saying that only in CP. Bcz here if your solutions get unlucky 1 times out of 40 and you print wrong answer, and judge doesnt care about your solution and say "its WA, its completely wrong"
You might think scoring on a per-TC factor might be a solution. But trust me, this isn't going to help. I've seen one competition score participants on a per-TC factor, turned out everyone snatched 50/100 score on a problem where the output is a single line of Yes/No.
I'm not sure what you mean by this. The editorial solution (and most, if not all, of the AC solutions, I'm pretty sure) is 100% deterministic and is guaranteed to give the correct answer (which is always unique) 100% of the time. Any solution that finds the correct answer in $$$O(n)$$$ expected runtime would pass, even if the approach is very different from the editorial solution. The output will always be the absolute maximum possible value (in binary, with no leading 0s) no matter what.
Now, it is possible to get an AC with a solution that doesn't guarantee the correct answer, but (a) that's not what the editorial solution is doing, and (b) it should still be accepted in the context of the problem design. Here is an example of an elegant 5-line solution (not mine) that looks nothing like the editorial solution, but it still definitely deserves its AC with respect to the problem design imo: 176783281
Show me any such test.
By saying that, I meant a solution can get very unlucky, and fail(work too slow, etc (Basically not getting AC)). Like you mentioned in editorial, its rare that 20 ones come repeatedly(as I see your solution depends on this fact), But what if generator gives such a case that your solution works slow.
I am sure that any reasonable implementation of the solution can handle a block of size up to $$$100$$$ ones, not $$$20$$$. The probability of the first block being longer than $$$100$$$ is about $$$2^{-100}$$$. A meteor hitting Earth and imploding CF servers is more probable to make the solution get rejected than this thing.
I think it's more likely that somebody would hack into the Codeforces system undetected and modify the tests to make incorrect solutions pass and/or correct solutions fail. If you want to be intellectually consistent, why don't you start screaming about how we should not have automated testing anymore? Please volunteer to manually scan every character of every test in every submission for this contest to help ensure its absolute integrity.
Nah, the chance that he will misjudge one half of all submissions is absolutely higher than the chance that a reasonable solution will fail the tests on the problem.
The fact that you think this just means you don't understand the point of probabilistic approaches, and more to that point, of CP itself.
Plus I'm pretty sure your contribution agrees with me
I spent the entirety of the round trying to solve D with XOR instead of OR -> -108
One of my most embarassing moments ever on a codeforces contest. And here I thought I had already taught myself well enough not to rush through statements. Such a simple problem, had I just read it right :/
Thanks for the comment, for last half an hour I was also thinking of xor. At first i thought you did a typing mistake an replace xor with or lol.
My approach for B was simply to have 1 and 2 at the two opposite endpoints. Any permutation of size $$$\geq 2$$$ needs both 1 and 2, but now the only way to include both would be to take the entire array.
Problem D is the first time I encountered a Codeforces problem where the runtime analysis depended on input distribution. I like this idea, and would want to see more of this kind, since I think it's quite a refreshing change from the usual scenario of considering only the worst-case, and this expected runtime analysis is also very relevant for practical applications.
I really like Problem E; very simple to understand, no unnecessarily complicated factors in the description, yet creative enough to require an interesting approach (imo) while the natural greedy ideas that would first come to mind end up failing.
I also like that Problem A is actually easy enough to hopefully not invoke ragequits and/or demotivate participants from their struggles with it, unlike some of the other recent contests.
Overall, great contest! Thank you!
In problem D if we have first '0' at 10-5 or 10-6 position so the block of '1's becomes of huge size and we run into O(n^2). How is it possible that the probabiliy that its length is 20 or bigger is about 10−6, so the expected number of prefixes we need to check is also O(1). Thus, the expected runtime of our solution is O(n)
Please explain
Let's ignore the leading 0s, and consider the first 1. What is the probability that there are no 0s in the next 20 indices? Well, the probability that the next index is non-zero is $$$1/2$$$, and then the probability that the index after that is non-zero is also $$$1/2$$$, and so on. So the probability that these 20 indices in a row are all non-zero is $$$\underbrace{1/2 \times 1/2 \times \cdots \times 1/2}_{20 \text{ terms}} = (1/2)^{20} = 1/1048576 < 10^{-6}$$$
This is already very unlikely to happen. And even if it does happen, there are still further circumstances that need to arise for a TLE, depending on the algorithm. For example, if you disregard a starting position as soon as you find it to be inferior to the current best starting position, then we'd expect that most of these bit positions would not actually have to visit all 0s, so there is still a decent chance that we'd fall under the time limit.
We'd need to be incredibly unfortunate to not only have a lot of 1s lined up, but also that actually checking each of these 1s as a starting position ends up having to visit a significant enough portion of the string that the total runtime ends up exceeding the relatively generous 4-second limit. I think the probability of this happening is far lower than the probability that say, your computer dies or your network crashes or some technical glitch leads to a rejected verdict. And so far, I haven't seen anyone post such an unfortunate verdict that arose under such incredibly low odds in an algorithm that would have been expected to pass with such a high probability, and I personally very much doubt this would happen in the next 100 years.
Ouch I have known everything in the tutorial of prob D, except thinking of tests are generated randomly… Actually I noticed that, but I did not care because I did not think it can be a critical information. I guess over half of participants who did not finish prob D have the same reason to me
Sounds so true!
I hope that teaches you the importance of upsolving. 1729E - Guess the Cycle Size from Codeforces Round 820 (Div. 3) was based on the same idea. Had you solved that question, you could have solved this easily. Cheers :)
Not true, I had solved 1729E during contest, but couldn't solve 1743D :( Here's my submission 1729E
Pro tip: Any constraints or information mentioned about the inputs is important. It depends on the author, sometimes they mix it in right along with everything else, maybe in the hopes of some people missing it, sometimes it's mentioned separately in it's own section and written in bold.
It could be equally important in both cases but if it's in bold it's definitely important
+1
Finally BLUE!
Very easy solution for E:
Let dp[h] be a vector with all possible pairs (x1, x2) so that the first gun is ready at the moment x1, and the second — at x2.
Iterate over enemy's hp, for all pairs (x1, x2) for the current hp update the future dp values. There are 3 transitions: shoot the 1st gun, the 2nd gun and both.
The number of pairs (x1, x2) for certain hp may be very large, so notice that we have no interest in (x1, x2) if there exists (y1, y2) and y1 < x1, y2 < x2. Drop bad pairs. Now their number magically becomes very small (no proof) and you get AC in 15 ms.
Can you explain this test case:
input:
3 19
4 29
11 2
correct output:
67
upd: like what sequence leads to the correct output?
(A single shot of the first laser deals 1 damage and a double shot deals 5 damage)
at time t = 19 first 1 laser damage = 1 at time t = 38 fire both laser together damage = 1 + 5 = 6 at time t = 67 fire both laser again damage = 6 + 5 = 11
The situation looks quite similar to 1612F - Armor and Weapons. Maybe a similar analysis could be possible?
Nice approach, thanks! I wonder if there's a proof that the number of elements in these filtered (x_i, y_i) sets is small.
Note: A little mistake in transitions and this solution may be slow again. I see your solution uses e.g. a transition (x1, x2) -> (x1 + t1, max(x1, x2)) which is a bit of a magic... Making it (x1, x2) -> (x1 + t1, x2) will cause TL.
As far as I see the correct one would be (x1, x2) -> (x1 + t1, x2) only if x1 < x2. And this is the only case we should fire this specific transition. Same logic goes for the transition when only second gun fires. Transition with both guns firing is always allowed. So for each (x_i, x_i) state there are at most two transitions.
Well, what if (x1, x2) -> (x1 + t1, max(x1, x2)) and x1 > x2. We get into (x1 + t1, x1) but fire only first gun at x1 and we're ready to fire second gun at x1. That is correct, but the fact that we actually must fire it is dropped. Suppose making that transition again -> (x1 + t1 + t1, x1 + t1). It's ok but we only fired first gun and the second was just skipped, but in general it seems like there may be no way to get into that dp state since the problem statement does not allow to fire one gun when both are charged and the second to just wait.
Can someone share the DP solution for C. Save the Magazines ?
Define $$$dp_{i,j}$$$ as the maximum number of magazines we can save upto index $$$i$$$, where $$$j=0$$$ if the current box does not have a lid on it, and $$$j=1$$$ if the current box has a lid on it.
The transitions can be formulated in the following manner:
- Case 1: $$$s_i=0$$$, which means this box does not have a lid on it. Here, $$$dp_{i,1}=0$$$ because there is no way for us to have a lid on this box, and $$$dp_{i,0}=max(dp_{i-1,0},dp_{i-1,1})$$$, since we can consider either of two cases for the previous box and take the maximum number of magazines saved either way.
- Case 2: $$$s_i=1$$$, which means this box has a lid on it. Here, $$$dp_{i,1}=max(dp_{i-1,0},dp_{i-1,1})+a_i$$$, since we can consider either of two cases for the previous box, and we are also saving the magazines of the current box. But in case this box were not to have a lid on it, then we must pass on the lid of this box to the previous box. Thus, $$$dp_{i,0}=dp_{i-1,0}+a_{i-1}$$$, since we are considering the case when the previous box does not have a lid on it, and adding the number of magazines of the previous box to our current dp state, since the previous box now does have a lid on it.
Our answer is $$$max(dp_{n,0},dp_{n,1})$$$.
C++ code.
I have not standart one, but there it is.
Let's say, $$$dp[i][0]$$$ is the maximum sum when the element $$$i$$$ has $$$0$$$ lids, $$$dp[i][1]$$$ the element $$$i$$$ has $$$1$$$ lid, $$$dp[i][2]$$$ the element $$$i$$$ has $$$2$$$ lids, and the extra $$$dp[i][3]$$$ the element $$$i$$$ has $$$1$$$ lid and we can not move this lid (the case when we move the lid to this element from right).
Now, let's count DP from right to left (from $$$n$$$ to $$$1$$$) if $$$s[i] == 1$$$, we can have $$$1$$$ lid or $$$2$$$ lids on this box, so $$$dp[i][1] = max(dp[i + 1][0], dp[i + 1][1], dp[i + 1][2], dp[i + 1][3]) + a[i]$$$ and $$$dp[i][2] = max(dp[i + 1][1] - a[i + 1], dp[i + 1][2]) + a[i]$$$, why we sub $$$a[i + 1]$$$, beucase $$$i+1$$$-th box had $$$1$$$ lid and we removed it, so we have to minus $$$a[i + 1]$$$.
For $$$s[i] == 0$$$ the situation is almost the same, $$$dp[i][0] = max(dp[i + 1][0], dp[i + 1][1], dp[i + 1][2], dp[i + 1][3])$$$ just get the best value. $$$dp[i][3] = max(dp[i + 1][1] - a[i + 1], dp[i + 1][2] + a[i]$$$.
Answer is $$$max(dp[1][i])$$$ for $$$i\in{0,1,2,3}$$$
https://codeforces.net/contest/1743/submission/176734080
Any idea for problem D considering the worst case, not just random test cases?
You could view it as searching for a longest match to a pattern with single-character wildcards, e.g in second example you want to search for "11?1":
For exact matches, there's O(n log n) FFT-based algorithm, basically by expressing matching as a convolution. Here we rather need longest prefix match to it, so you could slap a binary search on top of it to get something like O(n log^2 n).
Suffix trees could be used for wildcard matching but afaik too slow at O(n * <number of wildcards>)
hey is it possible to use LPS array with some kind of custom compare between current element and prev element to search max prefix match
Shorter implementation of D
Can you please explain your logic of D's solution
We can see testcases are generated randomly so probability of k consecutive 1's is 1/2^k, let's say k = 30 in almost impossible scenario. Why is this important? We can brute-force on length of s2 which we will see later.
Now we want two sub-strings let's say s1 and s2, my hypothesis is choosing s1 = s gives best result always! Why? Because we can always choose s2 in such a way that s1|s2 is the answer.
How to choose s2 ?
Let's say s = 1110010, s1 = 1110010, s2 = ?, now we will use sliding window approach.Let's choose s2 of length from n to max(0, n-30)
So we get
Here, we can see we are checking all the best options for s2. Thanks :)
thank you for awesome explanation.
For problem C because we can move the led once just go from right to left and when we counter 0 add sum — min And reset
I did AC C in the contest but then I got TLE test 22 and then I submitted that code and I got AC. My complexity is O(n). my code: https://ideone.com/uYgSAB. Help me!
check how you initialize dp array. you use N value instead of n. and every test case you clear this array. so your time complexity is (t*N)=2*10^9 ops
For me, E's solution code is pretty hard to understand. I guess the code
is for the case "the last shot before destroying the ship wasn't a double one"?
And for the interpretation of
dp[i]
: the smallest time needed to destroyi
where at the end the shot must be double. So before the last shot anything could happen. Then by analysis we conclude "between two double-shots there is no need to wait except for waiting for the double shot", which intuitively makes sense but I don't have a rigorous proof for this.There is an easier solution for F.For each element x between 0 and 300000,the contribution of the element is only related to the latest position that x is included in the set,no matter how many times x have appeared in previous sets.It can be proved that if the last set which includes x is the ith set,the contribution of x will be pow(3,i-2)*pow(2,n-i+1),except the 1th set which is pow(3,i-1)*pow(2,n-i) (or pow(2,n-1))
So we can simply use a segment tree to implement interval assignment and solve the problem in O(M+nlogM) time.
Solution -> 176975490
Proof:If there exist m ways of the previous k operations that x is inclueded in the current set,if x is included in the next set,there will be pow(3,k) ways that the next operation is and,creating m sets which include x,there will be pow(3,k) ways that the next operation is or,creating pow(3,k) sets which include x,there will also be pow(3,k) ways that the next operation is xor,creating pow(3,k)-m sets which include x,so the total number of sets which include x will be 2*pow(3,k).It is not related to m.
Otherwise,if x is not included in the next set,if the next operation is and,no set will include x,if the next operation is or,m sets will include x,if the next operation is xor,m sets will include x,so the total number of sets which include x will be m*2.
In conclusion,the total contribution of x is only related to the last position x appeared in the set.
Why do we need to add extra "0" to the start for C.Save the magazines problem?
By adding an extra "0" to the start, it becomes possible to use the greedy algorithm from the beginning even when the string starts with a "1".
You could also add all the folders to ans untill you encounter the first "0", but this trick makes the code slightly shorter.
I did not understand the proof of why the two strings which are taken have to be prefixes only S = 1111001101 s1 = S let us take s2 = s[2 , 7 ] then s1 | s2 will be maximized as all the 0's will be 1 now in s1
but the prefixes , wont be able to do so .
I am surely missing something .
The corresponding prefix is s2 = s[0,7] which will also give the same s1|s2 value here.
So , will it create a problem if we take suffixes?
Are these two different things KFPanda
Sorry for directly asking you as I was also thinking the same as the above guy.
Taking prefixes only makes the number of possible (with unique value) s2's less. Hence you need lesser computation to find the optimal s2, hence better.
Another insight on G that I found experimentally: the string $$$f_i$$$ can only appear up to $$$\frac{n}{\lvert f_{i-1} \rvert}$$$ times in a string of length $$$n$$$.
I found this by checking for which $$$l$$$ the prefix of $$$f_i$$$ of length $$$l$$$ is equal to the suffix of the same length. It seems this is the case iff. $$$l \in [\lvert f_{i-2} \rvert, \lvert f_{i-4} \rvert, \lvert f_{i-6} \rvert, \dots]$$$. Thus, whenever $$$f_i$$$ appears in a string, the next appearance must be at least $$$\lvert f_i \rvert - \lvert f_{i-2} \rvert = \lvert f_{i-1} \rvert$$$ characters further to the right. It might be possible to formally prove this, but my brain is way too fried right now to attempt it.
With this, we can almost implement the basic dp solution mentioned in the editorial. We can use string hashing to quickly check which $$$f_i$$$ end at an index $$$j$$$. To subtract the value of $$$dp_{j - \lvert f_i \rvert}$$$, we can use a different strategy for long and short $$$f_i$$$. If we consider $$$f_i$$$ up to length $$$N$$$ short, we keep a history of $$$N$$$ dp values to find $$$dp_{j - \lvert f_i \rvert}$$$ for short $$$f_i$$$. For longer ones, we know that the total number of their appearances is quite low, and thus also the number of indices $$$j - \lvert f_i \rvert$$$. We can find these indices using preprocessing and then keep their dp value around, for example in a map. See 177351187 for my solution which uses $$$N = 100$$$.
I also used that idea in my solution, but implemented it in an even more straightforward way. Maintain a queue of updates for each fib string. When there is an occurrence of $$$f_k$$$ from some position $$$i$$$ forwards, push an update $$$i + F_k$$$ to tell it to subtract $$$dp_i$$$ from there. There can be at most $$$F_i$$$ updates in the $$$i$$$-th queue, and that idea tells us that there can't be more than $$$\frac{n}{F_{i-1}}$$$ updates there as well. So all queues can't grow too large.
Also, don't use hash, just compare naively. It'll break quite fast if it's not an occurrence (the same idea helps). The solution itself.
Nice contest
bro i can't reply to you, i'm a new user
awoo and BledDest can any of you please explain this part of the editorial of the 1743E - FTL:
Can not we calculate the time with like PUSH DP like :
DP[i+damage of 1st laser] = min(DP[i] + reload time of the 1st laser, DP[i+damage of 1st laser]
and similarly other two DP states
What's wrong in this thought process AND IF YOU CAN PLEASE REPLY IT WILL BE VERY HELPFUL sir
I have a slightly different (more like, improved) solution for G and it is currently by far the fastest one (77ms), so I want to share my approach. The idea is to compute, for each prefix of the entire concatenated string, the largest Fibonacci string that is its suffix. This uniquely determines all Fibonacci strings that are its suffixes: If $$$f_i$$$ is a suffix, then $$$f_{i-2}$$$ is also a suffix, and $$$f_{i-4}$$$ by induction, and so on. Also, it means that none of $$$f_{i-1}, f_{i-3} \ldots$$$ can be suffixes because they end in a different character ($$$f_i$$$ ends with $$$i \mod 2$$$). All this data can be stored in 3MB. The idea to compute the DP in under 1MB is to create a map $$$Mp$$$ which will keep the DP values for all indices $$$j$$$ such that there is a large Fibonacci string (say, over 100000 characters) starting at $$$j$$$. There cannot be many such indices. We also keep the last $$$2^{17}$$$ DP values in a circular buffer. This information is now enough to compute all DP values sequentially. Time complexity is exactly $$$O(N \log N)$$$ unconditionally. The only thing I didn't prove is that $$$Mp$$$ will have few elements, but it makes sense, as Fibonacci strings can't overlap arbitrarily.
my submission