Idea: BledDest
Tutorial
Tutorial is loading...
Solution (awoo)
fun main() = repeat(readLine()!!.toInt()) {
val (b, c, h) = readLine()!!.split(' ').map { it.toInt() }
println(minOf(b - 1, c + h) * 2 + 1)
}
Idea: BledDest
Tutorial
Tutorial is loading...
Solution (Neon)
#include <bits/stdc++.h>
using namespace std;
int main() {
ios::sync_with_stdio(false); cin.tie(0);
int t;
cin >> t;
while (t--) {
int n, k;
cin >> n >> k;
vector<int> a(n);
for (auto &x : a) {
cin >> x;
x %= k;
if (!x) x = k;
}
vector<int> ord(n);
iota(ord.begin(), ord.end(), 0);
stable_sort(ord.begin(), ord.end(), [&](int i, int j) {
return a[i] > a[j];
});
for (auto &x : ord) cout << x + 1 << ' ';
cout << endl;
}
}
Idea: BledDest
Tutorial
Tutorial is loading...
Solution (vovuh)
#include <bits/stdc++.h>
using namespace std;
void solve() {
int n, m;
string s;
cin >> n >> m >> s;
vector<int> lf(n), rg(n);
lf[0] = -1;
for (int i = 0; i < n; ++i) {
if (i > 0) lf[i] = lf[i - 1];
if (s[i] == '0') lf[i] = i;
}
rg[n - 1] = n;
for (int i = n - 1; i >= 0; --i) {
if (i + 1 < n) rg[i] = rg[i + 1];
if (s[i] == '1') rg[i] = i;
}
set<pair<int, int>> st;
for (int i = 0; i < m; ++i) {
int l, r;
cin >> l >> r;
int ll = rg[l - 1], rr = lf[r - 1];
if (ll > rr) {
st.insert({-1, -1});
} else {
st.insert({ll, rr});
}
}
cout << st.size() << endl;
}
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int t;
cin >> t;
while (t--) solve();
}
Idea: BledDest
Tutorial
Tutorial is loading...
Solution (BledDest)
n = int(input())
a = list(map(int, input().split()))
ans = 0
l = 0
while l < n:
r = l + 1
hasTwo = (a[l] == 2)
hasMiddleZero = False
while r < n:
if r - 1 > l and a[r - 1] == 0:
hasMiddleZero = True
if a[r] == 2:
hasTwo = True
good = (not hasMiddleZero) and (hasTwo or a[l] != 0 or a[r] != 0)
if not good:
break
r += 1
l = r
ans += 1
print(ans)
1849E - Max to the Right of Min
Idea: awoo
Tutorial
Tutorial is loading...
Solution (awoo)
#include <bits/stdc++.h>
using namespace std;
bool comp(const pair<int, int> &a, const pair<int, int> &b){
return a.second < b.second;
}
int main(){
int n;
scanf("%d", &n);
vector<pair<int, int>> stmn, stmx;
stmn.push_back({-1, -1});
stmx.push_back({n, -1});
long long ans = 0;
int len = 0;
set<pair<int, int>> cur;
cur.insert({-1, 0});
cur.insert({-1, 1});
for (int i = 0; i < n; ++i){
int x;
scanf("%d", &x);
--x;
while (stmn.back().first > x){
auto it = cur.lower_bound({stmn.back().second, 0});
auto me = it;
auto prv = it; --prv;
++it;
len -= me->first - prv->first;
if (it != cur.end() && it->second == 0)
len += it->first - prv->first;
cur.erase(me);
stmn.pop_back();
}
len += i - cur.rbegin()->first;
cur.insert({i, 0});
stmn.push_back({x, i});
while (stmx.back().first < x){
auto it = cur.lower_bound({stmx.back().second, 1});
auto me = it;
auto prv = it; --prv;
++it;
if (it != cur.end() && it->second == 0)
len += me->first - prv->first;
cur.erase(me);
stmx.pop_back();
}
cur.insert({i, 1});
stmx.push_back({x, i});
ans += len;
}
printf("%lld\n", ans - n);
}
Idea: BledDest
Tutorial
Tutorial is loading...
Solution (BledDest)
#include <bits/stdc++.h>
using namespace std;
#define pb push_back
#define mp make_pair
#define forn(i, n) for (int i = 0; i < (int)(n); ++i)
typedef long long LL;
typedef pair<int, int> PII;
const int N = 200000;
const int NODES = 32 * N;
const int INF = int(2e9);
int n;
int a[N];
int nx[NODES][2], cnt[NODES], fn[NODES];
int nodeCount = 1;
void addInt(int x, int pos) {
int ind = 0;
for (int i = 29; i >= 0; --i) {
int bit = (x >> i) & 1;
if (nx[ind][bit] == 0) {
nx[ind][bit] = nodeCount++;
}
ind = nx[ind][bit];
++cnt[ind];
}
fn[ind] = pos;
}
void addInt(int x) {
int ind = 0;
for (int i = 29; i >= 0; --i) {
int bit = (x >> i) & 1;
ind = nx[ind][bit];
++cnt[ind];
}
}
void removeInt(int x) {
int ind = 0;
for (int i = 29; i >= 0; --i) {
int bit = (x >> i) & 1;
ind = nx[ind][bit];
--cnt[ind];
}
}
PII findXor(int x) {
int ind = 0, res = 0;
for (int i = 29; i >= 0; --i) {
int bit = (x >> i) & 1;
if (cnt[nx[ind][bit]]) {
ind = nx[ind][bit];
} else {
ind = nx[ind][bit ^ 1];
res |= 1 << i;
}
}
return mp(res, fn[ind]);
}
int par[200000], ra[200000];
void dsuInit() {
forn(i, n) par[i] = i, ra[i] = 1;
}
int dsuParent(int v) {
if (v == par[v]) return v;
return par[v] = dsuParent(par[v]);
}
int dsuMerge(int u, int v) {
u = dsuParent(u);
v = dsuParent(v);
if (u == v) return 0;
if (ra[u] < ra[v]) swap(u, v);
par[v] = u;
ra[u] += ra[v];
return 1;
}
vector<int> v[200000];
vector<pair<int, PII> > toMerge;
vector<int> g[200000];
int color[200000];
void coloring(int x, int c){
if(color[x] != -1) return;
color[x] = c;
for(auto y : g[x]) coloring(y, c ^ 1);
}
int main() {
scanf("%d", &n);
forn(i, n) scanf("%d", a + i);
forn(i, n) addInt(a[i], i);
dsuInit();
for (int merges = 0; merges < n - 1; ) {
forn(i, n) v[i].clear();
forn(i, n) v[dsuParent(i)].pb(i);
toMerge.clear();
forn(i, n) if (!v[i].empty()) {
for (int x : v[i]) {
removeInt(a[x]);
}
pair<pair<int, int>, int> res = mp(mp(INF, INF), INF);
for (int x : v[i]) {
res = min(res, mp(findXor(a[x]), x));
}
toMerge.pb(mp(res.first.first, mp(res.second, res.first.second)));
for (int x : v[i]) {
addInt(a[x]);
}
}
for (auto p : toMerge) {
if (dsuMerge(p.second.first, p.second.second)) {
++merges;
g[p.second.first].pb(p.second.second);
g[p.second.second].pb(p.second.first);
}
}
}
forn(i, n) color[i] = -1;
coloring(0, 1);
forn(i, n) printf("%d", color[i]);
puts("");
return 0;
}
Is there any way to solve c by following 1) store all prefixes of string 2)store all suffixes of string 3)count 1 and 0 and store them in prefix array
Now when we get query l,r we create a temp string Such that temp= prefix till l-1 + string of zeroes till of length number of zeroes till r-l-1 +same with 1 + suffix [r+1]
Ans put them temp string to set ..
Will this work?
The time complexity of constructing such a string is like $$$O(n)$$$, so I think this will get a TLE.
However I managed to solve this problem by using string hash, which makes it available to calculate the hash of an operated temp string in $$$O(1)$$$.
To avoid hash collision, use a double hash.
Here's my submission 216020500. (Note that the time complexity is $$$O(m+n\log n)$$$ ,because we need a set to count all different hashes.)
If I solve using construction and suppose that constructed string temp;
How can I use ur hashing on this temp?
Your approch to use
std::string
and concat the four precomputed strings PREFIX+ZEROS+ONES+SUFFIX is still $$$O(n)$$$. (The time of concat is $$$O(\text{len})$$$)To solve the problem more efficiently, you need to manually implement a hash function.
Let's say the hash function $$$f(s)=\displaystyle\sum_{i=1}^{\ell}s[i]\times b^{\ell-i} \pmod M$$$, where $$$b$$$ and $$$M$$$ are constants.
If you already have the hash of string $$$s_1$$$ and $$$s_2$$$, then $$$f(s_1+s_2)=f(s_1)\cdot b^{\operatorname{len}(s_2)}+f(s_2)\pmod M$$$, which means we can concat two strings in $$$O(1)$$$ time (precalculate all the powers of $$$b$$$).
So, 1) calculate the hash of each prefix 2) calculate the hash of each suffix 3) calculate the hash of ones.
Then you can $$$O(1)$$$ calculate the hash of an operated string by concating the four parts using the method above.
I have implemented same thing but got memory limit exceed on test 4
You didn't precomputed the prefixes.
You used substr() function for getting prefix and suffix it,it's time complexity is o(size of substring) at worst case it will run till N .. somewhere it will lead to tle..
I was talking about how we can beforehand store prefixes
Like for example
Map<int,string>prefix_at_index
string pf="";
For i till s.length()
pf.push_back(s[i]);
prefix_at_index[i]=pf
Something like this..
Atleast log(n) complexity to get prefix Nd suffix
Storing all prefixes would also lead to MLE:
What is the total length of all prefixes of a string length $$$n$$$? It is $$$1 + 2 + 3 + 4 + 5 + \dots + n$$$, which is equal to $$$\displaystyle\frac{n(n + 1)}{2}$$$.
If $$$n = 200\,000$$$, $$$\displaystyle\frac{n(n + 1)}{2} = \frac{200\,000 \cdot 200\,001}{2} = 20\,000\,100\,000$$$.
The total length of all prefixes would be $$$20\,000\,100\,000$$$ and since each character is $$$1$$$ byte, your code would need to store $$$20\,000\,100\,000$$$ bytes in memory. $$$20\,000\,100\,000$$$ bytes is about $$$20\,000$$$ megabytes, which is more than the memory limit of $$$256$$$ megabytes.
Hello everybody!
The competition was very interesting, but despite this, I will still get negative points.
Thanks for the competition: adedalic, awoo, BledDest, Neon and MikeMirzayanov
Any problems simillar to C?
Is there any way to solve C by constructing new string for each query? I am getting memory limit exceed
Highly doubt it. Constructing new string is O(n) ( not account for any sorting you might want to do ). So if you do that for each query thats too slow.
If you construct a new string each query (and store all of them), your code will store $$$200\,000 \cdot 200\,000 = 40\,000\,000\,000 = 4 \cdot 10^{10}$$$ bytes of data (each character is $$$1$$$ byte, each string has $$$200\,000$$$ characters, there are $$$200\,000$$$ strings).
$$$4 \cdot 10^{10}$$$ bytes is about $$$4 \cdot 10^{4} = 40\,000$$$ megabytes which is quite a bit larger than the memory limit of $$$256$$$ megabytes.
Thanks brother for acknowledging me about this
Instead you can construct new Hash for every query. Let
(L, R)
is my query then,Hash[1 to (L - 1)]
andHash[(R + 1) to N]
remains unchanged. So we need to reconstruct only Hash[L to R] which is pretty straightforward,Hash[L to R]
=PrimePowerSum
[ L to ( L +Zero in this range
— 1)] + 2 *PrimePowerSum
[( L +Zero in this range
) to R].you can use string hash to do what you want to do
What is that simpler solution for problem F that most of the participants wrote?
I'll try to explain my simpler solution.
The basic idea is that, if I have at least 3 numbers with a common prefix in the binary representation, then no matter how I partition the sets, at least 2 of those will be in the same set, and all bits of the common prefix will be 0 upon xorring those numbers. So the idea is to find the longest prefix such that there are at least 3 elements with that prefix.
n = 2 is a corner case, which can be solved independently. Now assume n >= 3.
It is guaranteed that there exists some B such that right-shifting all elements by B will result in 3 or more equal elements, let's find the smallest such B.
Since I have picked the smallest such B, one can observe that there won't be more than 4 equal elements upon right shifting every element by B. (If there were 5 or more elements, then B-1 also would've worked.)
After making some cases manually, [VERY IMPORTANT:] one can observe that the minimum xor will be obtained by xorring the elements that become equal after right bitshifting by B. (because any other 2 numbers will have the xor >= $$$2^B$$$)
So, the solution is simple:
find B,
then make groups, such that all elements of the group become equal upon right shifting by B (size of all such groups <= 4)
within each group, try every way of partitioning them into 2 sets. (can actually be narrowed down to much fewer cases, but this much is enough for a complete solution)
Here is my solution with some slightly ugly casework at the end, but one can get the general idea from it: https://codeforces.net/contest/1849/submission/216361206
Problem E can also be solved using the trick from 1156E - Special Segments of Permutation. Firstly, precalculate the "borders" in which $$$p_i$$$ is the minimum or the maximum element using monotonic stacks. Let's fix the maximum element of the segment. As in the problem mentioned above go to the closest end of the maximum border. If it is to the left of $$$p_i$$$ just calculate the number of good segments ($$$minpos(l, r) < maxpos(l, r)$$$) directly. Otherwise calculate the number of bad segments ($$$minpos(l, r) \geq maxpos(l, r)$$$) and subtract it from the total number of segments. You can see the details in my submission (with comments!): 216141125
Can you elaborate on the complexity of your algorithm? It doesn't seem obvious why it isn't quadratic in the worst-case scenario
UPD: I went by the link to another similar problem that you provided, it is explained there very well. It is indeed, very educational problem, thanks!
Why in problem E in the author's solution in the line
len += it->first - prv->first;
(in the first while cycle) do we subtract from theit->first
not fromme->first
? As I understand, in the caseit->second == 0
, we should firstly subtract fromlen
the valueit->first - me->first
and then add the valueit->first - prv->first
.More surprisingly, both versions get accepted: 216151073 and 216150501.
The line
len += it->first - prv->first
is completely redundant in the min stack, since we are continuously removing minimums, it is impossible forit->second
to be 0. Hence, you can even delete that part completely and still get AC.Oh, that's true. I initially had the solution for min > max, so I hastily moved the lines around to work for the actual problem. That's why it's like that.
UPD: Apparently, it was as redundant in the first version.
Can someone explain C in another way. I don’t really understand the editorial’s solution.
when we try to sort
[l, r]
, ifstr[l] to str[mid]
are all'0'
, andstr[mid+1] to str[r]
are all'1'
, then the string wouldn't change at all.let's use
pos_0[i]
denote the position of first'0'
at posi
or to its left, andpos_0[i]
denote the position of the first'1'
at posi
or to its right.so for the
[l, r]
, ifpos_0[r]<pos_1[l]
, then the string wouldn't change at all. otherwise, the string will change.But we can found that if there are several '0' before pos l, and several '1' after pos r. that is
str[ll] to str[l-1]
are all'0'
andstr[r+1] to str[rr]
are all'1'
. Then you can found that sort[l, r]
and sort[ll, rr]
, we get actually the same string.Actually
pos_0[r]=pos_0[rr]
(becausestr[r+1] to str[rr]
are all '1'),pos_1[l]=pos_1[ll]
. So if sort[l1, r1]
and sort[l2, r2]
get different strings, then{ pos_1[l1], pos_0[r1] }
is different from{ pos_1[l2], pos_0[r2] }
My English is not very good. Please forgive my grammer error and poor typesetting.
Thanks for the explanation. Your explanation along with some of the editorial helped me understand :)
Why not add the editorial to "Contest Materials" ?
Upd: Added.
I am having a hard time understanding the problem C. While I can see that my approach isn't optimized at all, I would have expected either a TLE or an MLE but instead I got WA on test 2. My approach: for each m, sort the string [l,r], store it in an unordered set and later count the size of the set. I understand that if there is no change in the string after applying the op, adding the ti string (where ti==s) to the set means I have added the original string only and that is counted once as the set stores unique values. What am I understanding wrong here? My submission that got WA: 215938621
Deleted comment
Brother I am very stupid. I still don't understand what this means. Is there some part in that statement that needs deciphering? In my code each copy (ti) of the string s is affected by only one op, that is
sort(ti.begin()+l-1,ti.begin()+r-1)
. Is adding the string's copy to the set counted as another operation? if yes, then how does that matter? ThanksIgnore my previous comment. Your code is giving error because you are sorting the string in wrong way. Instead of doing sort(ti.begin()+l-1,ti.begin()+r-1), you should do sort(ti.begin()+l-1,ti.begin()+r). You can understand it yourself if you think for a few seconds.
suppose you want to sort [l, r] = [1, 2] for some string. sort(ti.begin()+l-1,ti.begin()+r-1) will reduce to sort(ti.begin(), ti.begin()+1) which is wrong.
I tried double hashing for problem C im still getting sometimes 1 more than the answer.
216112844
Take a look at Ticket 17005 from CF Stress for a counter example.
For what it's worth, for problem E there is another (overly complicated, but perhaps more straightforward) solution.
Let $$$[l_i, r_i]$$$ be the range where $$$p_i$$$ is the maximum element, $$$a_i$$$ be the position of the closest element to the left of $$$p_i$$$ which is less than $$$p_i$$$, or $$$a_i = 0$$$ if it does not exist. Now, let's fix $$$i$$$ and look at the subarrays where $$$p_i$$$ is the maximum element, i.e. subarrays $$$p_l, \dots, p_r$$$ such that $$$l_i \le l \le i$$$ and $$$i \le r \le r_i$$$.
Here comes the main observation: the subarray $$$p_l, \dots, p_r$$$ is good ($$$minpos_{l,r} < i$$$) iff $$$\min(a_i, \dots, a_r) \ge l$$$. The proof should be trivial so I will omit it. Therefore, for fixed $$$r$$$ we have $$$\max(0, \min(a_i, \dots, a_r) - l_i + 1)$$$ good subarrays.
In order to get rid of the $$$\max(0, \dots)$$$ part let's find the rightmost index $$$r_i'$$$, such that $$$r_i' \le r_i$$$ and $$$\min(a_i, \dots, a_{r_i'}) \ge l_i$$$. Since the array $$$a$$$ can be calculated beforehand, this part can be done in $$$O(\log n)$$$ using a sparse table or some other data structure. Then, the number of good subarrays containing $$$p_i$$$ is
Subtracting the $$$(r_i' - i + 1) \cdot (l_i - 1)$$$ part is easy, let's focus on the sum. We have the following problem: given an array $$$a_1, \dots, a_n$$$ and $$$n$$$ queries of the form $$$(l, r)$$$, for each query find $$$\sum_{i=l}^r \min(a_l, \dots, a_i)$$$. There must be an easier way to solve it, but here is what I came up with. Let's answer these queries offline by iterating $$$l$$$ from right to left.
We need to support 2 kinds of operations on the array $$$b$$$ which is initially empty: 1) append an element to the left of $$$b$$$, 2) query $$$\sum_{i=1}^{r} \min(b_1, \dots, b_i)$$$. We can use a segment tree for that. Operation 1 can be done using range assignment: when we are appending $$$a_i$$$ all we need is to assign $$$a_i$$$ on the range $$$[i, q_i)$$$, where $$$q_i$$$ is the closest position of an element less than $$$a_i$$$ to the right of $$$a_i$$$ (or $$$n + 1$$$, if it does not exist). And operation 2 is simply a range sum query.
That's it, not the easiest implementation, but it felt very natural to come up with. The final complexity is $$$O(n \log n)$$$ and my submission is 215978080.
can someone please explain in c why cant we just sort the array on each iteration will that not be nlogn?
m for all iterations and then long for storing:
This gives WA on 2nd tc
C++
~~~~~
int main() { long t,m,n; string s; cin>>t;
}
~~~~~~
Python
understood why got tle on python , but got a wa on cpp soln?
tle reason: for the tle part the thing is while sorting the time complexity is nlogn but when we insert in to set it is about logn so the difference of n matters in this case that causes tle (as we know n>logn)
I had the exact same doubt. This was my original question. Did you understand how the solution works?
sort is
[begin;end)
in C++ STL, soif (l<=r) sort(temp.begin()+l,temp.begin()+r);
should beif (l<=r) sort(temp.begin()+l,temp.begin()+r + 1);
(or just not decrement it beforehand).Problem E was a very interesting problem. My initial idea was to use a lazy segtree for an NlogN solution, but it turns out that the O(N) solution is much cleaner and simpler.
https://codeforces.net/contest/1849/submission/216349588
I've tried C with string hashing, my idea was basically for every query we first subtract the hashed part (l, r) from the full string hash, and then we add the "sorted" hash of (l, r) by counting the number of 1s and 0s in (l, r) using prefix sum. I am able to do it in O(1) as I am precomputing powers of "p" from 1 to n, prefix sum and prefix_hash of the string.
Although I think my logic seems sound, my solution fails on test case 4: link I am not able to recognize my mistake, any help would be appreciated.
My implementation is based on this source: https://cp-algorithms.com/string/string-hashing.html
I used same approach and my solution gives WA2
Why do I think D is easier than C?
Cab anyone help me to understand editorial of problem E? I tried but, I failed to understand it.
has anybody done D with DP? If yes could you please explain and share the approach?
I upsolved it with DP. Note that coloring a $$$0$$$ doesn't affect any other elements. Also, coloring a $$$1$$$ or $$$2$$$ may affect other $$$0$$$'s and other nonzero elements beside it. So, a possible strategy is to color some nonzero elements first, then color the $$$0$$$'s that are unaffected in the end.
The idea is to consider contiguous segments of nonzero elements such that to their right is either a boundary or $$$0$$$ and to their left is either boundary or $$$0$$$. If there is a two in a segment, then we may color the two once, and use the $$$2$$$nd operation on it twice so that will affect nonzeroes on both directions until it hits a boundary or $$$0$$$. Otherwise, we can color from either ends of the segment: if we color the right end, choose the left elements and we may affect a zero to its left; if we color the left end, choose the right elements and we may affect a zero to its right. So, there are $$$2$$$ or $$$3$$$ optimal ways to color a segment.
Suppose these segments are stored as a tuple $$$(l_i, r_i, t_i)$$$ where $$$t_i = 0$$$ if the segment is only composed of $$$1$$$'s and $$$t_i = 1$$$ if there exists a $$$2$$$ in the segment.
Let $$$dp_{i,j}$$$ be defined as the maximum number of zeroes you can affect processing the first $$$i$$$ segments such that the $$$i$$$th segment is colored the $$$j$$$th way. If $$$j=0$$$, then we color the right end; if $$$j=1$$$, then we color the left end; and if $$$j=2$$$, then we color the two if it is possible.
Transitions are pretty straightforward but messy. You have to consider if a segment contains a $$$2$$$, if it is right along a boundary, and what if consecutive segments are only one $$$0$$$ apart. Here's my submission: 216927717.
Let $$$m$$$ be the number of segments and $$$c$$$ be the number of zeroes. The answer is $$$m + c - \max{(dp_{m-1,0}, dp_{m-1,1}, dp_{m-1,2})}$$$.
But yeah, editorial solution is so much simpler.
There is another solution in F, a bit hard to code but easy to understand. Let's write binary search on the answer, now we need to check if answer is <= X. Then go from left to right, maintaining a binary trie. Then, on each level of the trie, check the following: if ith bit is set in X, then all of the numbers with which the XOR would have that bit have to be in a different component. Otherwise we can ignore the bigger component. Since we can unite components in $$$O(sz)$$$, and the sum of all sizes is $$$O(NlogMAX)$$$, one check works in $$$O(NlogMAX)$$$. Since the DFS constant is really hig, use DSU.
Working code: https://codeforces.net/contest/1849/submission/216600784
Can someone help with finding issue in my code?: 216602874 For each l[i], r[i], I substract hash of section from hash of the string and add hash of sorted section which is calculated using the formula of geometric series sum. I add ultimate hash to the set and output size of the set as an answer
The issue with the code appears to be GSS function and bigInt (__int128) type as I understood. In GSS function, when overflow occurs, division by r-1 will give wrong result. And the problem with __int128 type is that hash stored in it will be calculated erroneous if GSS returns negative number. So instead I used unsigned long long type.
For problem E, I have a cool idea, but unfortunately my poor ability can't make it happen. Is there anyone willing to take a look and tell me if this is impossible?
My idea is: for each index $$$i$$$, we can use a monotonic stack to solve $$$Lmin_i,Rmin_i$$$, meaning that when the interval left endpoint is in $$$[Lmin_i,i]$$$, the right endpoint is in $$$[i,Rmin_i]$$$, the minimum value of this interval is $$$a[i]$$$. The maximum value is similar, and can be described by $$$Lmax,Rmax$$$.
Then we try to construct a rectangle $$$A$$$ on a two-dimensional plane. Its lower left coordinate is $$$(Lmin, i)$$$, and its upper right coordinate is $$$(i, Rmin)$$$.
That is to say, this rectangle spans $$$[Lmin_i,i]$$$ on the x-axis, and spans $$$[i,Rmin_i]$$$ on the y-axis. The area of this rectangle is the number of intervals that satisfy the minimum value is a[i].
If we add another rectangle $$$B$$$ in the same way with $$$Lmax_j, Rmax_j$$$. Then the intersection part of rectangles $$$A$$$ and $$$B$$$ is: the number of intervals that satisfy the minimum value is a[i] and the maximum value is a[j].
If we require $$$i<j$$$ at this time. Then the sum of all such rectangle pairs intersecting like this is the answer to this question.
So what I think is that index $$$i$$$ goes from left to right, first query the total area of all $$$Lmin,Rmin$$$ rectangles within the range of $$$Lmax_i, Rmax_i$$$ rectangle, add it to the answer, and then add a $$$Lmin_i,Rmin_i$$$ rectangle.
The key to the problem is this task of dynamically adding rectangles and solving total area.
My idea may be naive, please don't laugh at me hard XD
Can i use string hash to solve the problem C?
Yes, I think it may work. If you use string hash you can simply calculate the hash of all the resulting string with some math. I would not recommend it though. It's better to solve easier problems the intended way instead of forcing your way through with advanced ds.
C and D should be swapped in my opinion
https://codeforces.net/contest/1849/submission/239744150 For problem E, Does anyone know what is this judgement means? I believe my code is correct and it passed 35 testcases.
Same problem bro. There is an error in generating test 36.
It looks like the issue was caused by a test generator submitted during the hacking phase (it was generating the test too slowly). I've rewritten that generator in C++ and rejudged all submissions receiving that verdict, so the problem should be fixed.
UPD: somehow that issue still persists, I'm not sure what is happening but hopefully I can resolve it
UPD2: okay, now it definitely works.
IN PROBLEM D, WHY CAN'T I FIRST BFS ALL THE 2 ONES AND THEN ALL ONES AND COUNT REST UNVISITED INDICES? PLS GIVE A TEST CASE FAILING THIS?
I think you can: https://codeforces.net/contest/1849/submission/280876264
Problem B monsters solution video editorial link: https://youtu.be/C1v2IkfKaEM?si=vfMBsIDWsvGQCO51