Thank you for participating! We hope you enjoyed the problems!
Links:
A. Crush-ed
Author: VS-Codes
The problem statement is simple enough, you can do all tasks up to difficulty $$$k$$$ every hour, and you need to maximize the time spent. However, you do need to perform a task every hour. So it is always optimal to start with the easiest task(lowest difficulty), do all tasks of that difficulty, and gradually move up. The answer (maximum time spent) is the total number of unique difficulties present in the array.
#include<bits/stdc++.h>
#define int long long
#define endl "\n"
#define FastIO ios_base::sync_with_stdio(false); cin.tie(NULL);
using namespace std;
void solve()
{
int n;
cin>>n;
set<int> s;
for(int i=0;i<n;i++)
{
int x;
cin>>x;
s.insert(x);
}
cout<<s.size()<<endl;
}
signed main(){
FastIO;
// #ifndef ONLINE_JUDGE
// freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
// #endif
int t=1;
cin>>t;
for(int i=1;i<=t;i++)
{
//cout<<"Case #"<<i<<": ";
solve();
}
return 0;
}
B. Yet Another Binary String Matching Problem
Author: VS-Codes
Let’s analyze all the possible cases possible when we pick up any two indexes:
- $$$s[i]=0$$$ & $$$s[j]=1$$$ : In this case, bitwise AND gives 0, and bitwise XOR gives 1. This basically means you can either keep the values as it is, or just swap them. So this implies that we can swap any 2 indexes with different bits.
- $$$s[i]=1$$$ & $$$s[j]=0$$$ : Same case as above.
- $$$s[i]=0$$$ & $$$s[j]=0$$$ : In this case, both bitwise AND and bitwise XOR gives 0. In short, this operation has no effect on the values of the two indexes.
- $$$s[i]=1$$$ & $$$s[j]=1$$$ : In this case, bitwise AND gives 1, while bitwise XOR gives 0. So, this implies that we can basically flip a set bit (convert a ‘1’ to ‘0’) if we have at least 2 set-bits (or 2 ‘1’s).
Now the operations become simple enough, we can either swap two different bits, or we can flip a set bit if there are at least 2 set bits in the string.
We count the number of set-bits in string $$$s$$$ and $$$t$$$, let them be $$$cnt_s$$$ and $$$cnt_t$$$ respectively. If $$$cnt_s < cnt_t$$$, the answer is $$$NO$$$ as we can’t flip any unset bit using any operation, so there is no way to increase the set-bit count of $$$s$$$.
And if the count of set-bits in both strings are equal, then the answer is obviously $$$YES$$$, as we can swap any 2 different bits to get the required string.
Now, for the case when $$$cnt_s > cnt_t$$$, the answer is $$$YES$$$ (as we can always decrease the count of set-bits in $$$s$$$), except for the case when $$$cnt_t$$$ is equal to $$$0$$$, as it is not possible to make the set-bit count of $$$s$$$ equal to $$$0$$$ if there are positive number of such bits.
#include<bits/stdc++.h>
#define int long long
#define endl "\n"
#define FastIO ios_base::sync_with_stdio(false); cin.tie(NULL);
using namespace std;
void solve()
{
int n;
cin>>n;
string s,t;
cin>>s>>t;
int cnt=0,cntt=0;
for(auto &it:s)
{
if(it=='1')
cnt++;
}
for(auto &it:t)
{
if(it=='1')
cntt++;
}
if(cnt<cntt || (cnt>0 && cntt==0))
{
cout<<"No"<<endl;
return;
}
cout<<"Yes"<<endl;
}
signed main(){
FastIO;
#ifndef ONLINE_JUDGE
freopen("input.txt", "r", stdin);
freopen("output.txt", "w", stdout);
#endif
int t=1;
cin>>t;
for(int i=1;i<=t;i++)
{
//cout<<"Case #"<<i<<": ";
solve();
}
return 0;
}
C. Good Subarrays
Author: Yomapeed
Let $$$f(l)$$$ be the sum of costs of all good subarrays starting at $$$l$$$. The answer will be the sum of $$$f(l)$$$ for all $$$1 \leq l \leq n$$$.
Consider the following definitions:
- $$$g(l, r) = l + (l + 1) + (l + 2) + \ldots + r = \frac{(r + l) \cdot (r - l + 1)}{2}$$$.
$$$next(l) =$$$ maximum position $$$r$$$ such that subarray $$$A[l \ldots r]$$$ is good.
$$$h(l, r) = \sum_{x = l}^{r} a_x \cdot (r - x + 1). = (\sum_{x = l}^{r}a_x) \cdot (r + 1) - \sum_{x = l}^{r} x \cdot a_x$$$
We will calculate $$$f(l)$$$ using the contribution technique, i.e., the contribution of each element in our answer.
For an array $$$A$$$ of length $$$n$$$ of distinct elements, $$$A = (A_1, A_2, \ldots, A_n)$$$, observe that the contribution of element $$$i$$$ over all prefixes is given by $$$C(i) = A_i \cdot (i + (i + 1) + (i + 2) + \ldots + n) = A_i \cdot g(i, n)$$$.
Thus, for all subarrays starting at $$$l$$$, contribution $$$C(x) = A_x \cdot g(x - l + 1, \text{next}(l) - l + 1)$$$ for all $$$l \leq x \leq \text{next}(l)$$$.
Let us calculate the values of $$$f(l)$$$ from $$$l = n$$$ to $$$1$$$.
$$$f(n) = a_n$$$.
Let $$$r = next(l + 1)$$$. When we add a new element $$$a_l$$$ to the front, the contribution $$$C(x)$$$ changes from $$$a_x \cdot g(x - l, r - l)$$$ to $$$a_x \cdot g(x - l + 1, r - l + 1)$$$ as the length of the prefixes increases by $$$1$$$.
$$$\Delta C(x) = a_x \cdot (r - x + 1)$$$.
Let $$$r' = next(l)$$$.
$$$f(l) = f(l + 1) + \sum_{x = l + 1}^{r} \Delta C(x) + a_l \cdot g(1, r - l + 1) = f(l + 1) + h(l + 1, r) + a_l \cdot g(1, r - l + 1)$$$
If $$$r = r'$$$, we have correctly calculated the value of $$$f(l)$$$.
If $$$r \gt r'$$$ we have to change the contribution of the elements in the range $$$[l, r]$$$.
We have two cases,
For elements in $$$(r', r]$$$, we subtract, $$$\sum_{x = r' + 1}^{r} a_x \cdot g(x - l + 1, r - x + 1)$$$. Iterating naively works here as every element is removed from some range at most once as $$$next(i) \leq next(i + 1)$$$.
For elements in $$$[l, r']$$$, we subtract $$$(\sum_{x = l}^{r'} a_x) \cdot g(r' + 1 - l + 1, r - l + 1)$$$ in $$$\mathcal{O}(1)$$$ by prefix sums or we can maintain the sum of the currently active segment.
Calculation of $$$h(l, r)$$$ can be done in $$$\mathcal{O}(n)$$$ and $$$next(l)$$$ can be calculated using map by storing the next occurrence of each element in $$$\mathcal{O}(n \log n)$$$.
Time Complexity $$$\mathcal{O}(n \log n)$$$.
#include<bits/stdc++.h>
using namespace std;
#pragma GCC optimize("Ofast")
/*#pragma GCC target("sse,sse2,sse3,ssse3,sse4,avx,avx2")*/
#pragma GCC optimization ("O3")
#pragma GCC optimization ("unroll-loops")
#define INF 999999999999999999
#define pb push_back
#define rep(i, a, b) for(int i = a; i < (b); ++i)
#define all(x) begin(x), end(x)
#define sz(x) (int)(x).size()
typedef pair<int, int> pii;
typedef vector<int> vi;
#define ff first
#define ss second
#define printclock cerr<<"Time : "<<1000*(long double)clock()/(long double)CLOCKS_PER_SEC<<"ms\n";
#define ll long long
#define fast ios_base::sync_with_stdio(false); cin.tie(NULL); cout.tie(NULL);
#include <ext/pb_ds/assoc_container.hpp>
using namespace __gnu_pbds;typedef tree<ll, null_type, less<ll>, rb_tree_tag, tree_order_statistics_node_update> indexed_set;
mt19937_64 rng(chrono::steady_clock::now().time_since_epoch().count());
ll rand(ll B) { return (unsigned ll)rng() % B;}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
const int mod = 1e9 + 7;
ll f(ll n){
return n * (n + 1) / 2;
}
ll g(ll l, ll r){
return ((r - l + 1) * (r + l) / 2) % mod;
}
int main() {
//freopen("input.txt", "r", stdin);
//freopen("output.txt", "w", stdout);
fast;
ll _T = 1;
cin >> _T;
ll sum = 0;
while (_T--) {
ll n;
cin >> n;
sum += n;
vector<ll> v(n);
for(int i = 0; i < n; i++){
cin >> v[i];
}
vector<ll> p = v;
for(int i = 1; i < n; i++){
p[i] += p[i - 1];
}
vector<ll> px = v;
for(int i = 0; i < n; i++){
px[i] = i * v[i];
}
for(int i = 1; i < n; i++){
px[i] += px[i - 1];
}
auto getSum = [&](ll l, ll r){
if(l > r) return 0LL;
return (p[r] - p[l - 1]) * (r + 1) - (px[r] - px[l - 1]);
};
map<ll,ll> nxt;
ll r = n - 1, ans = 0, tot = 0, seg_sum = 0;
for(int l = n - 1; l >= 0; l--){
ll npos = r;
if(nxt.find(v[l]) != nxt.end()){
npos = nxt[v[l]] - 1;
}
ans += getSum(l + 1, r) % mod;
ans += v[l] * g(1, r - l + 1) % mod;
seg_sum += v[l];
ans %= mod;
for(int j = r; j > npos; j--){
seg_sum -= v[j];
ans -= v[j] * g(j - l + 1, r - l + 1) % mod;
ans %= mod;
}
seg_sum %= mod;
if(npos < r){
ans -= seg_sum * g(npos + 1 - l + 1, r - l + 1) % mod;
ans %= mod;
}
r = min(r, npos);
ans %= mod;
if(ans < 0) ans += mod;
tot += ans;
tot %= mod;
nxt[v[l]] = l;
}
cout << tot << "\n";
}
return 0;
}
D1. Valley Dominoes (Easy Version)
Author: twoplusthree
We are required to count the number of valley subsequences of the array $$$h$$$. To do this, we can take the following approach — for every index $$$1 \leq q \leq n$$$, we count the total number of valley subequences considering $$$h_q$$$ as the smallest element of our subsequence. It is clear that the subsequence to the left of $$$h_q$$$ must be strictly decreasing, and that to the right must be strictly increasing.
Define $$$dpfd[i]$$$ as the number of subsequences starting at $$$i$$$ which are strictly increasing. The transition for $$$dpfd$$$ is then given by —
Similarly, we define $$$dpbk[i]$$$ as the number of subsequences ending at $$$i$$$ which are strictly decreasing. The transition for $$$dpbk$$$ is given by —
Both $$$dpfd$$$ and $$$dpbk$$$ can be computed naively in $$$\mathcal{O}(n^{2})$$$ time. Then the total number of valley subsequences is given by —
Time Complexity: $$$\mathcal{O}(n^2)$$$
#include <bits/stdc++.h>
using namespace std;
using ll = long long;
const int _ = 2003;
const int MOD = 1e9 + 7;
int n, h[_];
int dpfd[_], dpbk[_];
int main() {
int i, j;
cin >> n;
for (i = 1; i <= n; i++) {
cin >> h[i];
dpfd[i] = dpbk[i] = 0;
}
for (i = n; i >= 1; i--) {
dpfd[i] = 1;
for (j = i + 1; j <= n; j++) {
if (h[i] < h[j]) {
dpfd[i] = (dpfd[i] + dpfd[j]) % MOD;
}
}
}
for (i = 1; i <= n; i++) {
dpbk[i] = 1;
for (j = i - 1; j >= 0; j--) {
if (h[j] > h[i]) {
dpbk[i] = (dpbk[i] + dpbk[j]) % MOD;
}
}
}
int ans = 0;
for (i = 1; i <= n; i++) {
ans = (ans + (ll)dpbk[i] * dpfd[i]) % MOD;
}
cout << ans << "\n";
return 0;
}
D2. Valley Dominoes (Hard Version)
Author: twoplusthree
In this version of the problem, we can use the same approach as in D1, but we cannot afford to compute $$$dpfd$$$ and $$$dpbk$$$ in $$$\mathcal{O}(n^2)$$$ time. Instead, we can use a Fenwick Tree along with co-ordinate compression to optimise the time complexity down to $$$\mathcal{O}(n \cdot \log(n))$$$, which passes the constraints. Refer to this article for a detailed explanation of the underlying idea.
Time Complexity: $$$\mathcal{O}(n \cdot \log(n))$$$
#include <bits/stdc++.h>
using namespace std;
using ll = long long;
const int _ = 100005;
const int MOD = 1e9 + 7;
int n, h[_];
int dpfd[_], dpbk[_];
struct FenwickTree {
vector < int > bit;
int n;
FenwickTree(int n) {
this -> n = n + 1;
bit.assign(n + 1, 0);
}
int sum(int idx) {
int ret = 0;
for (++idx; idx > 0; idx -= idx & -idx) {
ret = (ret + bit[idx]) % MOD;
}
return ret;
}
int sum(int l, int r) {
return (MOD + sum(r) - sum(l - 1)) % MOD;
}
void add(int idx, int delta) {
for (++idx; idx < n; idx += idx & -idx) {
bit[idx] = (bit[idx] + delta) % MOD;
}
}
};
int main() {
int i, j;
cin >> n;
map < int , int > cmp;
for (i = 1; i <= n; i++) {
cin >> h[i];
cmp[h[i]];
dpfd[i] = dpbk[i] = 0;
}
int cnt = 0;
for (auto &x : cmp) {
x.second = ++cnt;
}
FenwickTree tfd(cnt + 3);
for (i = n; i >= 1; i--) {
dpfd[i] = (1 + tfd.sum(cmp[h[i]] + 1, cnt)) % MOD;
tfd.add(cmp[h[i]], dpfd[i]);
}
FenwickTree tbk(cnt + 3);
for (i = 1; i <= n; i++) {
dpbk[i] = (1 + tbk.sum(cmp[h[i]] + 1, cnt)) % MOD;
tbk.add(cmp[h[i]], dpbk[i]);
}
int ans = 0;
for (i = 1; i <= n; i++) {
ans = (ans + (ll)dpbk[i] * dpfd[i]) % MOD;
}
cout << ans << "\n";
return 0;
}
E. Stack Queries
Author: beedle
When an element is pushed to the stack, observe the new size of the stack. For each value of the new stack size, store :
- at which time instants this stack size is obtained?
- what was the value added at that time instant to obtain this stack size?
In a query if we want to find out what was the $$$n^{th}$$$ stack element at the $$$t^{th}$$$ time instant, we simply binary search over the time instants stored for stack size $$$n$$$ to find out the last element added before or on time instant $$$t$$$.
#include <bits/stdc++.h>
#define fast \
ios_base::sync_with_stdio (false); \
cin.tie (NULL); \
cout.tie (NULL)
using namespace std;
signed main()
{
fast;
int n,q;
cin>>n>>q;
vector <pair<int,int>> pos[n+1];
vector <int> s(n+1);
stack <int> st;
for(int i=1;i<=n;i++)
{
string str;
cin>>str;
if(str=="Push")
{
int x;
cin>>x;
st.push(x);
pos[st.size()].push_back({i,x});
}
else
{
st.pop();
}
s[i]=st.size();
}
int last=0;
while(q--)
{
int i,j;
cin>>i>>j;
i=(i+last)%n+1;
j=(j+last)%s[i]+1;
int loc=s[i]-j+1;
auto itr=lower_bound(pos[loc].begin(),pos[loc].end(),make_pair(i+1,0));
itr--;
last=itr->second;
cout<<last<<"\n";
}
return 0;
}
F. Step One Missing
Author: twoplusthree
Observe that the answer is independent of the starting positions of Alice and Bob. It depends only upon the difference ($$$b - a$$$) between their current position co-ordinates. At any move, one can decrease this difference by $$$0$$$, $$$2$$$ or $$$3$$$, constrained by their last move. If the difference becomes $$$0$$$ or negative, the game ends.
Let us consider that $$$dif$$$ is the current difference, $$$mylast$$$ is the last move played by the player whose turn it is currently and $$$hislast$$$ is the last move of their opponent. We define a boolean function $$$f(dif, mylast, hislast)$$$ which is $$$1$$$ iff the given game state is winning, otherwise it is $$$0$$$.
From the conditions given in the problem:
- $$$f(0,mylast,hislast) = 0$$$; (opponent landed on the player's spot on the previous turn)
- $$$f(dif,mylast,hislast) = 1$$$ if $$$dif < 0$$$; (opponent crossed the player's spot on the previous turn)
$$$f(dif, mylast, hislast)$$$ can be recursively defined as follows:
If there exists any $$$move \in \lbrace 0, 2, 3 \rbrace$$$ such that $$$move \neq mylast$$$ and $$$f(dif - move, hislast, move) = 0$$$, then it is $$$1$$$, since it is always optimal to play that move.
Otherwise, it is $$$0$$$.
Our answer for every test case is given by $$$f(b - a, -1,-1)$$$ (any invalid value may be used in place of $$$-1$$$). We can memoise $$$f$$$ to pass the time constraints. Note that $$$f$$$ is independent of the starting positions, so recomputing $$$f$$$ freshly for every test case is redundant and will get TLE verdict.
Time Complexity: $$$\mathcal{O}(N)$$$ over all test cases, where $$$N$$$ is the maximum value of $$$b - a$$$
#include <bits/stdc++.h>
using namespace std;
const int _ = 100003;
int a, b;
int dp[_][4][4];
bool ok[_][4][4];
bool f(int dif, int mylast, int hislast) {
if (dif == 0) {
return 0;
} else if (dif < 0) {
return 1;
}
if (ok[dif][mylast][hislast]) {
return dp[dif][mylast][hislast];
}
int i;
bool res = 0;
for (i = 0; i <= 3; i++) {
if (i == 1 || i == mylast) {continue;}
res |= !f(dif - i, hislast, i);
}
ok[dif][mylast][hislast] = 1;
return dp[dif][mylast][hislast] = res;
}
void solve() {
cin >> a >> b;
bool winning = f(b - a, 1, 1);
cout << (winning ? "Alice" : "Bob") << "\n";
}
int main() {
int tt;
cin >> tt;
memset(ok, 0, sizeof(ok));
while (tt--) {
solve();
}
return 0;
}
G. Gringotts
Author: twoplusthree
Let $$$a$$$ and $$$b$$$ be coprime integers such that $$$a$$$ Sickles are exactly equal in weight to $$$b$$$ Galleons. Hence, $$$a \cdot s = b \cdot g \implies \frac{a}{b} = \frac{g}{s}$$$ which indicates that $$$a$$$ and $$$b$$$ are the numerator and denominator of the reduced form of $$$\frac{g}{s}$$$.
Since the net weight of the coins must remain unchanged, we are only allowed to replace $$$ka$$$ Sickles with $$$kb$$$ Galleons, where $$$k$$$ is a whole number. If $$$17 \cdot b > a$$$, it is optimal to replace as many Sickles as possible, for which $$$k = \lfloor \frac{n}{a} \rfloor$$$. Otherwise the answer is $$$n$$$.
Time Complexity: $$$\mathcal{O}(\log(\min(s, g)))$$$ per test case
#include <bits/stdc++.h>
using namespace std;
using ll = long long;
int s, g, n;
void solve() {
cin >> s >> g >> n;
int x = gcd(s, g);
int a = g / x;
int b = s / x;
int k = n / a;
ll ans = max((ll)n, (ll)k * b * 17 + n % a);
cout << ans << "\n";
}
int main() {
int tt;
cin >> tt;
while (tt--) {
solve();
}
return 0;
}
H. Fine De
Author: twoplusthree
Let us consider that the capacity of Upayan's bag is $$$b$$$ books. Let $$$[x_0, x_1, ... x_{n - 1}]$$$ be the order in which we choose to return the books (first we return the book at index $$$x_0$$$, then $$$x_1$$$, and so on). Hence, the total fine imposed is —
Claim 1: It is always optimal to return the books sorted in the order of descending $$$p_i$$$.
Let $$$p_{x_i} < p_{x_j}$$$, for some $$$i < j$$$. Now, $$$i < j \implies \lfloor \frac{i}{b} \rfloor \leq \lfloor \frac{j}{b} \rfloor$$$. Hence,
Thus, it is optimal to swap $$$x_i$$$ and $$$x_j$$$ for a smaller $$$F$$$.
Claim 2: $$$F$$$ decreases monotonically with $$$b$$$.
Let $$$b_1 < b_2$$$. Then, $$$\lfloor \frac{i}{b_1} \rfloor \geq \lfloor \frac{i}{b_2} \rfloor$$$. Now, since $$$p_{x_i} \geq 0$$$,
Thus, we can use Binary Search to find the minimum value of $$$b$$$ ($$$1 \leq b \leq n$$$), for which $$$F(x, b) \leq r$$$. If there exists no such $$$b$$$, the answer is $$$-1$$$.
Time Complexity: $$$\mathcal{O}(n \cdot \log(n))$$$
#include <bits/stdc++.h>
using namespace std;
using ll = long long;
const int _ = 1000003;
int n, l[_], p[_], lsum;
ll r;
bool ok(int k) {
int i;
ll pot = lsum;
for (i = 0; i < n; i++) {
pot += p[i] * (i / k + 1);
}
return (pot <= r);
}
int main() {
int i;
cin >> n >> r;
lsum = 0;
for (i = 0; i < n; i++) {
cin >> l[i];
lsum += l[i];
}
for (i = 0; i < n; i++) {
cin >> p[i];
}
sort(p, p + n, [](int x, int y) {
return (x > y);
});
int ans = -1;
int lo, hi, mid;
lo = 1;
hi = n;
while (lo <= hi) {
mid = (lo + hi) / 2;
if (ok(mid)) {
ans = mid;
hi = mid - 1;
} else {
lo = mid + 1;
}
}
cout << ans << "\n";
return 0;
}
I. Queen
Author: om_mittal7
A quick observation reduces this problem to trivial if-else cases. A queen never takes more than 2 moves to get from any point on the chessboard to any other point on the board (as it can always move vertically to get to the Rth row in the first move, and then horizontally to reach the Cth column in the second move). Thus, it can reach (R, C) in a maximum of two moves.
The answer is zero iff the queen is already at (R, C). The queen needs one move iff (R, C) lies on the same diagonal, vertical or horizontal as itself. In all other cases, 2 moves are needed.
#include <bits/stdc++.h>
using namespace std;
void solve()
{
int n, x1, y1, x2, y2;
cin >> n >> x1 >> y1 >> x2 >> y2;
if (x1 == x2 && y1 == y2)
cout<<"0\n";
else if ((x1 == x2) || (y1 == y2) || (abs(x2 - x1) == abs(y2 - y1)))
cout<<"1\n";
else
cout<<"2\n";
}
signed main()
{
ios_base::sync_with_stdio(false);
cin.tie(NULL);
int t = 1;
cin >> t;
for (int i = 1; i <= t; i++)
solve();
return 0;
}
J. Plag Incoming
Author: Anupam_Ghosh
TLE Elimination Step: Since it was asked to find total pairs staisfying $$$(arr[j]-arr[i])^2$$$ == $$$(j-i)$$$ Notice that LHS is always perfect square no. so for the inequality to hold,RHS must also be a perfect sqaure no. So,its just sufficient to check only over pairs when $$$(j-i)$$$ is a perfect square no. so for every index '$$$i$$$' keeping it fixed we only iterate over of all indexes of form $$$i+1,i+4,i+9,i+16......i+j*j<n$$$ and check for the condition.
#include <bits/stdc++.h>
using namespace std;
#define int long long
#define FastIO ios_base::sync_with_stdio(false);
cin.tie(NULL);
signed main() {
FastIO;
int n;
cin >> n;
int arr[n];
for (int i = 0; i < n; i++) {
cin >> arr[i];
}
int count = 0;
for (int i = 0; i < n; i++) {
int j = 1;
while (i + j * j < n) {
if ((arr[i + j * j] - arr[i]) * (arr[i + j * j] - arr[i]) == (j * j)) {
count++;
}
j++;
}
}
cout << count;
return 0;
}
K. Zombies Are Fun
Author: Anupam_Ghosh
We use a min-heap priority queue to store the zombies' health in ascending order.Also we keep a note of summation of heath of zombies and the total damage so far.The use of the priority queue allows for efficient removal of zombies with health less than or equal to the total damage so far.
For event type 1,we increment the total damage by $$$x$$$ and pops out zombies whose health was <= total damage so far and decrementing its health from total sum.
For event type 2, we simply push the new zombie's health considering its health as (y+total damage so far) and also increment its new health in total sum.
For event type 3,the remaining sum of all alive zombies is calculated by subtracting the damage multiplied by the number of remaining zombies(size of the priority queue) from the total sum and thereby printing the result.
#include <bits/stdc++.h>
using namespace std;
#define int long long
const int N = 1e5 + 1;
signed main() {
ios_base::sync_with_stdio(false);
cin.tie(NULL);
int n, q;
cin >> n >> q;
int t;
long long sum = 0;
priority_queue < int, vector < int > , greater < int >> pq;
for (int i = 0; i < n; i++) {
cin >> t;
sum += t;
pq.push(t);
}
int damage = 0;
while (q--) {
cin >> t;
if (t == 1) {
int x;
cin >> x;
damage += x;
while (pq.size() && pq.top() <= damage) {
sum -= pq.top();
pq.pop();
}
if (t == 2) {
int y;
cin >> y;
sum += (damage + y);
pq.push(damage + y);
}
if (t == 3) {
int res = sum - damage * pq.size();
cout << res << '\n';
}
}
}
return 0;
}
L. Square Trees
Author: DrearyJoke
Observe that there are only $$$p = 15$$$ primes till $$$50$$$. Therefore, every number can be represented by a $$$p$$$-bit bitmask where the $$$i^{th}$$$ bit is $$$1$$$ if the power of the $$$i^{th}$$$ prime is odd in the number. The mask of the product of two numbers is simply the $$$\oplus$$$ (bitwise XOR) of the their masks and a perfect square has mask $$$0$$$. Henceforth, $$$A_v$$$ will be used to refer to the mask of node $$$v$$$.
We root the tree arbitrarily and define $$$dp[v][m]$$$ as the number of subgraphs which contain $$$v$$$ as the highest node in the rooted tree and have mask $$$m$$$. Then, the answer is $$$\sum_{v = 1}^{n} dp[v][0]$$$. In order to calculate $$$dp[v]$$$, let's define $$$dp_i[v][m]$$$ as the number of subtrees containing $$$v$$$ itself and $$$0$$$ or more nodes from the subtrees of the first $$$i$$$ children of $$$v$$$ so that the nodes form a connected subgraph and their XOR is $$$m$$$. $$$dp_0[v][m]$$$ is $$$1$$$ for $$$m = A_v$$$ and $$$0$$$ otherwise. Then, $$$dp_i[v][m]$$$ can be calculated as
where $$$c_i$$$ is the $$$i^{th}$$$ child of $$$v$$$ and $$$M = 2^p - 1$$$ is the maximum possible mask.
This has complexity $$$O(n \cdot 2^{2p})$$$ which is too slow. But, it can be seen that the transition from $$$dp_{i - 1}[v]$$$ to $$$dp_{i}[v]$$$ is simply a XOR convolution of $$$dp_{i - 1}[v]$$$ and $$$dp[c_i]$$$ and can be calculated with FWHT in $$$O(p \cdot 2^p)$$$ time to speed up the solution to $$$O(n \cdot p \cdot 2^p)$$$ which is fast enough.
#include<bits/stdc++.h>
using namespace std;
using ll = long long;
using pii = pair<int, int>;
using pll = pair<ll, ll>;
#define ff first
#define ss second
#define pb push_back
#define mp make_pair
#define END "\n"
#define rall(v) (v).rbegin(), (v).rend()
#define all(v) (v).begin(), (v).end()
#define fastio ios_base::sync_with_stdio(false); cin.tie(NULL); cout.tie(NULL);
#include <ext/pb_ds/assoc_container.hpp>
using namespace __gnu_pbds;
template <typename T>
using ordered_set =
tree<T, null_type, less<T>, rb_tree_tag, tree_order_statistics_node_update>;
#define rep(i, from, to) for (int i = from; i < (to); ++i)
#define all(x) x.begin(), x.end()
#define sz(x) (int)(x).size()
typedef long long ll;
typedef pair<int, int> pii;
const ll mod = 998244353; // change to something else
ll euclid(ll a, ll b, ll &x, ll &y) {
if (!b) return x = 1, y = 0, a;
ll d = euclid(b, a % b, y, x);
return y -= a/b * x, d;
}
struct Mod {
ll x;
Mod(ll xx = 0) : x(xx) {}
Mod operator+(Mod b) { return Mod((x + b.x) % mod); }
Mod operator-(Mod b) { return Mod((x - b.x + mod) % mod); }
Mod operator*(Mod b) { return Mod((x * b.x) % mod); }
Mod operator/(Mod b) { return *this * invert(b); }
Mod invert(Mod a) {
ll x, y, g = euclid(a.x, mod, x, y);
assert(g == 1); return Mod((x + mod) % mod);
}
Mod operator^(ll e) {
if (!e) return Mod(1);
Mod r = *this ^ (e / 2); r = r * r;
return e&1 ? *this * r : r;
}
};
typedef vector<Mod> vi;
void FST(vi& a, bool inv) {
for (int n = sz(a), step = 1; step < n; step *= 2) {
for (int i = 0; i < n; i += 2 * step) rep(j,i,i+step) {
Mod &u = a[j], &v = a[j + step]; tie(u, v) =
// inv ? pii(v - u, u) : pii(v, u + v); // AND
// inv ? pii(v, u - v) : pii(u + v, u); // OR /// include-line
pair<Mod, Mod>(u + v, u - v); // XOR /// include-line
}
}
if (inv) for (Mod& x : a) x = x / sz(a); // XOR only /// include-line
}
vi conv(vi a, vi b) {
FST(a, 0); FST(b, 0);
rep(i,0,sz(a)) a[i] = a[i] * b[i];
FST(a, 1); return a;
}
vector<int> primes;
const int maxN = 50;
void do_primes() {
for (int i = 2; i <= maxN; ++i) {
bool ok = true;
for (int j = 2; j * j <= i; ++j) {
if (i % j == 0) {
ok = false;
break;
}
}
if (ok) primes.push_back(i);
}
}
int prime_mask(int a) {
int mask = 0;
for (int i = 0; i < primes.size(); ++i) {
while (a % primes[i] == 0) mask ^= (1 << i), a /= primes[i];
}
return mask;
}
void solve(){
int n;
cin >> n;
vector<vector<int>> adj(n + 1);
for (int i = 1; i < n; ++i) {
int a, b;
cin >> a >> b;
--a, --b;
adj[a].push_back(b);
adj[b].push_back(a);
}
vector<int> A(n);
for (int i = 0; i < n; ++i) {
cin >> A[i];
A[i] = prime_mask(A[i]);
}
const int L = 1 << primes.size();
vector<vi> dp(n, vi(L, 0));
Mod ans = 0;
auto dfs = [&] (auto&& self, int v, int p) -> void {
dp[v][A[v]].x = 1;
for (auto u: adj[v]) {
if (u != p) {
self(self, u, v);
dp[v] = conv(dp[v], dp[u]);
}
}
ans = ans + dp[v][0];
dp[v][0] = dp[v][0] + 1;
};
dfs(dfs, 0, -1);
cout << ans.x << '\n';
}
int main()
{
do_primes();
fastio
int t = 1;
// cin>> t;
while(t--) solve();
return 0;
}