Thank you for Participating !
A.Hasan and Binary Function
We can obtain $$$n$$$ from the binary representation as following
We know that the fact
Let’s consider the last active bit i.e. $$$\mathtt{1}$$$ is $$$i$$$ then $$$n$$$ as integer is
thus the answer is the index of last occurrence of $$$\mathtt{1}$$$ , Complexity $$$O(n)$$$.
for _ in range(int(input())):
n=int(input())
s=input()
ans=0
for i in range(n):
if(s[i]=='1'):
ans=n-i-1
break
print(ans)
B.Hasan and Beautiful !
Maintain two arrays $$$b$$$ and $$$c$$$ such that
Now , maintain a function $$$f$$$ that process the maximum subarray sum in $$$O(n)$$$ for both of arrays $$$b$$$ and $$$c$$$ (Kadane's Algorithm for example), be careful of initialization that it’s non-empty , and the answer is $$$\max(f(b),f(c))$$$ , Complexity $$$O(n)$$$.
def f(a):
best = a[0]
sum = a[0]
for i in range(1, len(a)):
sum = max(a[i], sum + a[i])
best = max(best, sum)
return best
for _ in range(int(input())):
n, k = list(map(int, input().split()))
a = list(map(int, input().split()))
b = a.copy()
c = a.copy()
for i in range(n):
b[i] //= k
c[i] *= k
print(max(f(b), f(c)))
C.Hasan and Special Remove
One way to solve the problem is to make a fixed-size sub-array and iterate through the array and test all the subarrays with length $$$k$$$ i.e Sliding window of length $$$k$$$ , so we can make a sliding window with length $$$k$$$ and iterate the array which yields $$$n- k + 1$$$ and you delete $$$k − 1$$$ elements in total then the total complexity for each case can reach $$$O(n^2)$$$ ,Unfortunately it’s not efficient enough so we should optimize!. We can optimize the solution by making a frequency map ; we use a greedy approach by removing the first $$$k$$$ elements from Map (dict) structure , and then doing a sliding window minimum and finally obtain the minimum final number of distinct numbers , total complexity is $$$O(n \log n)$$$ due to using maps.
def solve():
n,k = map(int,input().split())
a = list(map(int,input().split()))
a = [str(arr[i]) for i in range(n)]
dic = {}
for num in a:
if num not in dic: dic[num] = 0
dic[num] += 1
for i in range(k):
dic[a[i]] -= 1
if dic[a[i]]==0: del dic[a[i]]
ans = len(dic)
for i in range(k,n):
if a[i-k] not in dic: dic[a[i-k]] = 0
dic[a[i-k]] += 1
dic[a[i]] -= 1
if dic[a[i]]==0: del dic[a[i]]
ans = min(ans, len(dic))
print(ans)
for _ in range(int(input())):
solve()
D.Hasan and Permutations
Let's see for each index $$$i$$$ among the permutation $$$p_n$$$ , what's the count of possible cases for it , we can use $$$(n-i)$$$ integers , thus for each index $$$i$$$ the count is $$$2^{n-i}$$$ , thus the answer will be the sum of count overall indices
due to exclusion of last index , complexity is $$$O(\log n)$$$ (Binary Exponentiation).
m=10**9+7
def p(a,b,m):
a%=m
r=1
while(b>0):
if(b&1):
r=r*a%m
a=a*a%m
b>>=1
return r
def ff(n):
return (p(2,n,m)-n-1)%m
for _ in range(int(input())):
n=int(input())
print(ff(n))
E.Square Triangle
the given condition is $$$x \le i \le j \le y$$$, which implies $$$i \le j$$$, meaning we don't have to consider elements with $$$i > j$$$, and can set to $$$0$$$. Now, what if we find sum of $$$g[i][j]$$$, such that $$$x \le i, j \le y$$$ , The indices which do not satisfy $$$i \le j$$$ are set to $$$0$$$, meaning that it doesn't affect the answer. Thus, it's enough to output the sum of integers of the new grid's sub-grid $$$(x, x)$$$ to $$$(y, y)$$$ per query, which can be done using 2d prefix sum.
#include<bits/stdc++.h>
using namespace std;
long long cbz(int fx,int fy,int tx,int ty,vector<vector<long long>> &bsum){
if(fx>tx || fy>ty){return 0;}
if(fx==0 && fy == 0){return bsum[tx][ty];}
else if(fx==0){
return bsum[tx][ty]-bsum[tx][fy-1];
}
else if(fy==0){
return bsum[tx][ty]-bsum[fx-1][ty];
}
else{
return bsum[tx][ty]-bsum[tx][fy-1]-bsum[fx-1][ty]+bsum[fx-1][fy-1];
}
}
int main(){
ios::sync_with_stdio(false);
cin.tie(nullptr);
int n,q;
cin >> n >> q;
vector<vector<long long>> a(n,vector<long long>(n,0));
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
cin >> a[i][j];
}
}
auto rs=a;
for(int i=0;i<n;i++){
for(int j=1;j<n;j++){rs[i][j]+=rs[i][j-1];}
}
for(int i=1;i<n;i++){
for(int j=0;j<n;j++){rs[i][j]+=rs[i-1][j];}
}
auto ts=a;
for(int i=1;i<n;i++){
for(int j=0;j<n;j++){ts[i][j]+=ts[i-1][j];}
}
for(int j=1;j<n;j++){
for(int i=1;i<n;i++){
ts[i][j]+=ts[i-1][j-1];
}
}
// for(auto &nx : ts){
// for(auto &ny : nx){
// cout << ny << " ";
// }cout << "\n";
// }
while(q>0){
q--;
int l,r;
cin >> l >> r;
l--; r--;
long long res=ts[r][r];
// cout << res << "\n";
if(l>0){
res-=cbz(0,l,l-1,r,rs);
res-=ts[l-1][l-1];
}
cout << res << "\n";
}
return 0;
}
F.Behruzbek and XOR Queries
Let's say $$$x=0$$$ for all the queries. Then, we can sort all the queries by $$$y$$$, and setup a segment tree(XOR). From the smallest $$$y$$$, we check all integers in the array which are $$$\le y$$$, and update them in segment tree. Now, for each query it's simple range XOR query on segtree.
What if that's not the case, and $$$x > 0$$$ ? Then the answer for query l r x y
is (l r 0 x-1) ^ (l r 0 y)
, both of which can be processed using the method above. At most, he have to process $$$2q$$$ queries of type l r 0 y
#include<bits/stdc++.h>
#include <algorithm>
#include <cassert>
#include <functional>
#include <vector>
#ifdef _MSC_VER
#include <intrin.h>
#endif
#if __cplusplus >= 202002L
#include <bit>
#endif
namespace atcoder {
namespace internal {
#if __cplusplus >= 202002L
using std::bit_ceil;
#else
unsigned int bit_ceil(unsigned int n) {
unsigned int x = 1;
while (x < (unsigned int)(n)) x *= 2;
return x;
}
#endif
int countr_zero(unsigned int n) {
#ifdef _MSC_VER
unsigned long index;
_BitScanForward(&index, n);
return index;
#else
return __builtin_ctz(n);
#endif
}
constexpr int countr_zero_constexpr(unsigned int n) {
int x = 0;
while (!(n & (1 << x))) x++;
return x;
}
} // namespace internal
} // namespace atcoder
namespace atcoder {
#if __cplusplus >= 201703L
template <class S, auto op, auto e> struct segtree {
static_assert(std::is_convertible_v<decltype(op), std::function<S(S, S)>>,
"op must work as S(S, S)");
static_assert(std::is_convertible_v<decltype(e), std::function<S()>>,
"e must work as S()");
#else
template <class S, S (*op)(S, S), S (*e)()> struct segtree {
#endif
public:
segtree() : segtree(0) {}
explicit segtree(int n) : segtree(std::vector<S>(n, e())) {}
explicit segtree(const std::vector<S>& v) : _n(int(v.size())) {
size = (int)internal::bit_ceil((unsigned int)(_n));
log = internal::countr_zero((unsigned int)size);
d = std::vector<S>(2 * size, e());
for (int i = 0; i < _n; i++) d[size + i] = v[i];
for (int i = size - 1; i >= 1; i--) {
update(i);
}
}
void set(int p, S x) {
assert(0 <= p && p < _n);
p += size;
d[p] = x;
for (int i = 1; i <= log; i++) update(p >> i);
}
S get(int p) const {
assert(0 <= p && p < _n);
return d[p + size];
}
S prod(int l, int r) const {
assert(0 <= l && l <= r && r <= _n);
S sml = e(), smr = e();
l += size;
r += size;
while (l < r) {
if (l & 1) sml = op(sml, d[l++]);
if (r & 1) smr = op(d[--r], smr);
l >>= 1;
r >>= 1;
}
return op(sml, smr);
}
S all_prod() const { return d[1]; }
template <bool (*f)(S)> int max_right(int l) const {
return max_right(l, [](S x) { return f(x); });
}
template <class F> int max_right(int l, F f) const {
assert(0 <= l && l <= _n);
assert(f(e()));
if (l == _n) return _n;
l += size;
S sm = e();
do {
while (l % 2 == 0) l >>= 1;
if (!f(op(sm, d[l]))) {
while (l < size) {
l = (2 * l);
if (f(op(sm, d[l]))) {
sm = op(sm, d[l]);
l++;
}
}
return l - size;
}
sm = op(sm, d[l]);
l++;
} while ((l & -l) != l);
return _n;
}
template <bool (*f)(S)> int min_left(int r) const {
return min_left(r, [](S x) { return f(x); });
}
template <class F> int min_left(int r, F f) const {
assert(0 <= r && r <= _n);
assert(f(e()));
if (r == 0) return 0;
r += size;
S sm = e();
do {
r--;
while (r > 1 && (r % 2)) r >>= 1;
if (!f(op(d[r], sm))) {
while (r < size) {
r = (2 * r + 1);
if (f(op(d[r], sm))) {
sm = op(d[r], sm);
r--;
}
}
return r + 1 - size;
}
sm = op(d[r], sm);
} while ((r & -r) != r);
return 0;
}
private:
int _n, size, log;
std::vector<S> d;
void update(int k) { d[k] = op(d[2 * k], d[2 * k + 1]); }
};
} // namespace atcoder
using namespace std;
using namespace atcoder;
long long op(long long x,long long y){
return (x^y);
}
long long e(){
return 0;
}
int main(){
ios::sync_with_stdio(false);
cin.tie(nullptr);
int t;
cin >> t;
while(t>0){
t--;
long long n,q;
cin >> n >> q;
vector<array<long long,3>> ev;
for(long long i=0;i<n;i++){
long long a;
cin >> a;
ev.push_back({a,0,i});
}
vector<int> l(q),r(q);
vector<long long> res(q);
for(int i=0;i<q;i++){
cin >> l[i] >> r[i];
l[i]--;
long long x,y;
cin >> x >> y;
ev.push_back({x-1,1,i});
ev.push_back({y,1,i});
}
sort(ev.begin(),ev.end());
segtree<long long,op,e> seg(n);
for(auto &nx : ev){
// cerr << nx[0] << " " << nx[1] << " " << nx[2] << "\n";
if(nx[1]==0){
seg.set(nx[2],nx[0]);
}
else{
res[nx[2]]^=seg.prod(l[nx[2]],r[nx[2]]);
}
}
for(auto &nx : res){cout << nx << "\n";}
}
return 0;
}
G.Hasan and Fair Split
notice that the set of prime factors common between numerator and denumerator are the same and in the numerator for each power of common it’s at least has the same power as den and We’re aiming to minimize the product thus we need to make a divisible by b (for each prime separately) so whenever we had an even power for some prime the best thing is split it into equal parts.
Calculate sieve for all numbers upto $$$2 \cdot 10^5$$$ this can be achieved in $$$O(n \log(\log(n)))$$$ which is fast , Find the prime factorization of $$$n!$$$ this can be found with the prime factorization form of $$$n!$$$
For each prime factor $$$p_i$$$ the power $$$\alpha_i$$$ can be calculated with the following formula
it can be show that this sum converges in $$$a$$$ , $$$O(\log(n))$$$ it’ll take no more than $$$20$$$ runs , thus the final prime factorization will take $$$O(20p)$$$.
Now we want to have $$$a \bmod b = 0$$$ (assuming fraction $$$\frac{a}{b}$$$) , so a productive greedy is to have for each $$$p_i$$$ with power $$$\alpha_i$$$ the following : for $$$a$$$ the power is $$$\displaystyle \left \lceil \frac{\alpha_i}{2} \right \rceil$$$ and for $$$b$$$ the power is $$$\displaystyle \left \lfloor \frac{\alpha_i}{2} \right \rfloor$$$.
Therefore we only need to look at $$$\alpha_i \bmod 2 = 1$$$ i.e. odd powers , and calculate The following
For each test case , we calculate for each prime upto $$$n$$$ it’s power , the count of prime upto $$$n$$$ are approximately $$$O \left ( \frac{n}{\log(n)} \right )$$$ and we check the power in $$$O(\log(n))$$$ and even better in practice , this yields prime factorization of $$$n!$$$ takes at most $$$O(n)$$$ time , we can with each prime factorized if the final power is odd then multiply it by the answer and take the modulus by $$$10^9 + 7$$$ , thus the total complexity for multiple test cases is $$$O(8 \cdot 10^5)$$$ for precalculation of sieve (primes) , and $$$O(\sum n)$$$ for answering test cases.
def solve():
x = int(input())
ip= [1] * (x + 1)
ip[0] = ip[1] = 0
for i in range(2, int(x**0.5) + 1):
if ip[i]:
for j in range(i * i, x + 1, i):
ip[j] = 0
p = [0] * (x + 1)
for i in range(2, x + 1):
if ip[i]:
cnt = 0
j = i
while j <= x:
cnt += x // j
if j > x // i: break
j *= i
p[i] = cnt
ans = 1
MOD = 10**9 + 7
for i in range(2, x + 1):
if p[i] % 2:
ans = (ans * i) % MOD
print(ans)
for _ in range(int(input())):
solve()
H.Insertion
can be done with simple Treap, or using offline queries and an ordered set / segtree and array recovery with answering/handling queries in reverse, or sqrt decomposition.
#include<bits/stdc++.h>
using namespace std;
#define SZ 250
vector<vector<long long>> dat={{}};
int main(){
ios::sync_with_stdio(false);
cin.tie(nullptr);
int q;
cin >> q;
while(q>0){
q--;
int typ;
cin >> typ;
if(typ==1){
long long i,x;
cin >> i >> x;
bool rb=false;
for(auto &nx : dat){
if(i<=nx.size()){
nx.insert(nx.begin()+i,x);
if(nx.size()>2*SZ){rb=true;}
break;
}
i-=nx.size();
}
if(rb){
// rebuild
vector<long long> v;
for(auto &nx : dat){
for(auto &ny : nx){v.push_back(ny);}
nx.clear();
}
dat.clear();
int vl=v.size();
dat.resize((vl/SZ)+1);
for(int i=0;i<vl;i++){
dat[i/SZ].push_back(v[i]);
}
}
}
else{
int i;
cin >> i;
i--;
for(auto &nx : dat){
if(i<nx.size()){
cout << nx[i] << "\n";
break;
}
i-=nx.size();
}
}
}
return 0;
}
I.Another queries?
simple centroid decomposition problem, precomputing the answers for each centroid using prefix sums and answering the queries in $$$O(\log n)$$$ or $$$O(\log^2 n)$$$ depending on the implementation, and using the fact that $$$q(l, r) = q(1, r) - q(1, l-1)$$$.
// https://judge.yosupo.jp/submission/102080
#define PROBLEM "https://judge.yosupo.jp/problem/vertex_add_range_contour_sum_on_tree"
#include <iostream>
#include <algorithm>
#include <array>
#include <cassert>
#include <deque>
#include <iostream>
#include <queue>
#include <random>
#include <utility>
#include <vector>
namespace suisen {
template <typename T, T(*op)(T, T), T(*e)()>
struct PointAddRangeContourSumOnTree {
using value_type = T;
private:
struct InternalSegmentTree {
InternalSegmentTree(int n = 0) : _n(n), _dat(n + 1, e()) {}
template <typename InputIterator>
InternalSegmentTree(InputIterator first, InputIterator last) : _n(last - first), _dat(2 * _n, e()) {
for (int i = 0; i < _n; ++i) _dat[_n + i] = *first++;
for (int i = _n - 1; i > 0; --i) _dat[i] = op(_dat[2 * i], _dat[2 * i + 1]);
}
void add(int i, const value_type& val) {
for (i += _n; i; i >>= 1) _dat[i] = op(_dat[i], val);
}
value_type sum(int l, int r) const {
l = std::max(0, l), r = std::min(r, _n);
value_type res = e();
for (l += _n, r += _n; l < r; l >>= 1, r >>= 1) {
if (l & 1) res = op(res, _dat[l++]);
if (r & 1) res = op(res, _dat[--r]);
}
return res;
}
private:
int _n;
std::vector<value_type> _dat;
};
using sequence_type = InternalSegmentTree;
struct AuxInfo {
int8_t child_index;
int dep;
};
struct TreeNode {
std::vector<int> adj;
typename std::array<AuxInfo, 30>::iterator info_it;
value_type dat;
};
public:
PointAddRangeContourSumOnTree(int n = 0, const value_type& fill_value = e()) : PointAddRangeContourSumOnTree(std::vector<value_type>(n, fill_value)) {}
PointAddRangeContourSumOnTree(const std::vector<value_type>& a) : _n(a.size()), _nodes(_n), _par(2 * _n, -1), _info(_n), _subtrees(2 * _n), _ord(_n) {
for (int i = 0; i < _n; ++i) _nodes[i].dat = a[i];
}
void add_edge(int u, int v) {
_nodes[u].adj.push_back(v);
_nodes[v].adj.push_back(u);
}
// O(NlogN)
void build() {
std::mt19937 rng{ std::random_device{}() };
reorder(std::uniform_int_distribution<int>{ 0, _n - 1 }(rng));
int new_node = _n;
std::vector<int> sub_size(2 * _n, 0);
std::vector<int> ctr(2 * _n, -1);
std::vector<int> head(2 * _n), tail(2 * _n), link(2 * _n);
for (int i = 0; i < _n; ++i) head[i] = tail[i] = i;
std::vector<value_type> dat(_n);
auto rec = [&](auto rec, int r, int siz) -> int {
int c = -1;
auto get_centroid = [&](auto get_centroid, int u, int p) -> void {
sub_size[u] = 1;
for (int v : _nodes[u].adj) if (v != p) {
get_centroid(get_centroid, v, u);
if (v == c) {
sub_size[u] = siz - sub_size[c];
break;
}
sub_size[u] += sub_size[v];
}
if (c < 0 and sub_size[u] * 2 > siz) c = u;
};
get_centroid(get_centroid, r, -1);
for (int v : _nodes[c].adj) {
const int comp_size = sub_size[v];
_nodes[v].adj.erase(std::find(_nodes[v].adj.begin(), _nodes[v].adj.end(), c));
ctr[v] = rec(rec, v, comp_size);
sub_size[v] = comp_size;
}
auto comp = [&](int i, int j) { return sub_size[i] > sub_size[j]; };
std::priority_queue<int, std::vector<int>, decltype(comp)> pq{ comp };
for (int v : _nodes[c].adj) {
link[v] = -1;
pq.push(v);
}
auto build_sequence = [&, this](const int root_head, const bool child_index) {
std::deque<std::pair<int, int>> dq;
for (int root = root_head; root >= 0; root = link[root]) dq.emplace_back(root, -1);
value_type sum = e();
auto dat_it = dat.begin();
int nxt = -1;
while (dq.size()) {
const auto [u, pu] = dq.front();
dq.pop_front();
if (u == nxt) *dat_it++ = std::exchange(sum, e()), nxt = -1;
auto& node = _nodes[u];
*node.info_it++ = { child_index, int(dat_it - dat.begin()) };
sum = op(sum, node.dat);
for (int v : node.adj) if (v != pu) {
dq.emplace_back(v, u);
if (nxt < 0) nxt = v;
}
}
*dat_it++ = sum;
return sequence_type(dat.begin(), dat_it);
};
while (pq.size() >= 2) {
const int u = pq.top(); pq.pop();
const int v = pq.top(); pq.pop();
if (pq.empty()) {
_par[ctr[u]] = _par[ctr[v]] = c;
_subtrees[c][0] = build_sequence(head[u], 0);
_subtrees[c][1] = build_sequence(head[v], 1);
break;
}
sub_size[new_node] = sub_size[u] + sub_size[v];
ctr[new_node] = new_node;
_par[ctr[u]] = _par[ctr[v]] = new_node;
_subtrees[new_node][0] = build_sequence(head[u], 0);
_subtrees[new_node][1] = build_sequence(head[v], 1);
head[new_node] = head[u], tail[new_node] = tail[v], link[tail[u]] = head[v];
pq.push(new_node);
++new_node;
}
if (pq.size()) {
int u = pq.top(); pq.pop();
_par[ctr[u]] = c;
_subtrees[c][0] = build_sequence(head[u], 0);
}
for (int v : _nodes[c].adj) _nodes[v].adj.push_back(c);
return c;
};
rec(rec, 0, _n);
_par.resize(new_node), _par.shrink_to_fit();
_subtrees.resize(new_node), _subtrees.shrink_to_fit();
}
// O(1)
value_type get(int u) const {
u = _ord[u];
return _nodes[u].dat;
}
// O((logN)^2)
void add(int u, const value_type& val) {
u = _ord[u];
_nodes[u].dat = op(_nodes[u].dat, val);
int v = _par[u];
const auto it_end = _nodes[u].info_it;
for (auto it = _info[u].begin(); it != it_end; ++it) _subtrees[std::exchange(v, _par[v])][it->child_index].add(it->dep, val);
}
// O((logN)^2)
value_type sum(int u, int dl, int dr) const {
u = _ord[u];
value_type res = dl <= 0 and 0 < dr ? _nodes[u].dat : e();
res = op(res, _subtrees[u][0].sum(dl - 1, dr - 1));
res = op(res, _subtrees[u][1].sum(dl - 1, dr - 1));
int v = _par[u];
const auto it_end = _nodes[u].info_it;
for (auto it = _info[u].begin(); it != it_end; ++it) {
const int ql = dl - it->dep - 1, qr = dr - it->dep - 1;
if (v < _n and ql <= 0 and 0 < qr) res = op(res, _nodes[v].dat);
res = op(res, _subtrees[std::exchange(v, _par[v])][it->child_index ^ 1].sum(ql - 1, qr - 1));
}
return res;
}
private:
int _n;
std::vector<TreeNode> _nodes;
std::vector<int> _par;
std::vector<std::array<AuxInfo, 30>> _info;
std::vector<std::array<sequence_type, 2>> _subtrees;
std::vector<int> _ord;
void reorder(int s) {
_ord.assign(_n, -1);
int t = 0;
std::deque<int> dq{ s };
while (dq.size()) {
int u = dq.front(); dq.pop_front();
_ord[u] = t++;
for (int v : _nodes[u].adj) if (_ord[v] < 0) dq.push_back(v);
}
assert(t == _n);
std::vector<TreeNode> tmp(_n);
for (int i = 0; i < _n; ++i) {
for (int& elm : _nodes[i].adj) elm = _ord[elm];
_nodes[i].info_it = _info[_ord[i]].begin();
tmp[_ord[i]] = std::move(_nodes[i]);
}
_nodes.swap(tmp);
}
};
} // namespace suisen
long long op(long long x, long long y) {
return x + y;
}
long long e() {
return 0;
}
int main() {
std::ios::sync_with_stdio(false);
std::cin.tie(nullptr);
int n, q;
std::cin >> n >> q;
std::vector<long long> a(n);
for (auto &e : a) std::cin >> e;
suisen::PointAddRangeContourSumOnTree<long long, op, e> g(a);
for (int i = 0; i < n - 1; ++i) {
int u, v;
std::cin >> u >> v;
u--; v--;
g.add_edge(u, v);
}
g.build();
while (q --> 0) {
// int query_type;
// std::cin >> query_type;
// if (query_type == 0) {
// int p, x;
// std::cin >> p >> x;
// g.add(p, x);
// } else {
int p, l, r;
std::cin >> p >> l >> r;
p--;
r++;
std::cout << g.sum(p, l, r) << '\n';
// }
}
return 0;
}