idea: Imakf
Solution
Tutorial is loading...
Code
By Imakf
#include <bits/stdc++.h>
#define debug(...) fprintf(stderr ,__VA_ARGS__)
#define LL long long
const LL MOD = 1e9 + 7;
const int MX = 1e5 + 23;
int read(){
char k = getchar(); LL x = 0;
while(k < '0' || k > '9') k = getchar();
while(k >= '0' && k <= '9') x = x * 10 + k - '0' ,k = getchar();
return x;
}
int n ,a[MX];
void solve(){
n = read();
for(int i = 1 ; i <= n ; ++i) a[i] = read();
int ok = 1;
for(int i = 1 ; i <= n ; ++i){
if(a[i] % a[1]) ok = false;
}
puts(ok ? "YES" : "NO");
}
int main(){
int T = read();
while(T--) solve();
return 0;
}
idea: waaitg
Solution
Tutorial is loading...
Code
By Imakf
#include <bits/stdc++.h>
#define debug(...) fprintf(stderr ,__VA_ARGS__)
#define __FILE(x)\
freopen(#x".in" ,"r" ,stdin);\
freopen(#x".out" ,"w" ,stdout)
#define LL long long
const int MX = 1e5 + 23;
const LL MOD = 998244353;
int read(){
char k = getchar(); int x = 0;
while(k < '0' || k > '9') k = getchar();
while(k >= '0' && k <= '9') x = x * 10 + k - '0' ,k = getchar();
return x;
}
int a[MX];
void solve(){
int n = read() ,l = read() ,r = read();
int ok = 1;
for(int i = 1 ; i <= n ; ++i){
a[i] = ((l - 1) / i + 1) * i;
ok = ok && a[i] <= r;
}
if(ok){
puts("YES");
for(int i = 1 ; i <= n ; ++i)
printf("%d%c" ,a[i] ," \n"[i == n]);
}
else puts("NO");
}
int main(){
int T = read();
for(int i = 1 ; i <= T ; ++i){
solve();
}
return 0;
}
idea: Imakf
Solution
Tutorial is loading...
Code of solution 1
By Imakf
#include <bits/stdc++.h>
#define debug(...) fprintf(stderr ,__VA_ARGS__)
#define __FILE(x)\
freopen(#x".in" ,"r" ,stdin);\
freopen(#x".out" ,"w" ,stdout)
#define LL long long
const int MX = 5e5 + 23;
const LL MOD = 998244353;
int read(){
char k = getchar(); int x = 0;
while(k < '0' || k > '9') k = getchar();
while(k >= '0' && k <= '9') x = x * 10 + k - '0' ,k = getchar();
return x;
}
int n ,a[MX] ,q ,choose[MX];
bool check(int mid){
for(int i = 1 ; i <= n ; ++i) choose[i] = 0;
int iq = q;
for(int i = 1 ; i < mid ; ++i)
if(a[i] <= iq) choose[i] = 1;
for(int i = mid ; i <= n ; ++i){
choose[i] = 1;
if(iq == 0) return 0;
if(a[i] > iq) --iq;
}
if(iq < 0) return 0;
return 1;
}
void solve(){
n = read() ,q = read();
for(int i = 1 ; i <= n ; ++i)
a[i] = read();
int l = 1 ,r = n ,mid;
while(l <= r){
mid = (l + r) >> 1;
if(check(mid)) r = mid - 1;
else l = mid + 1;
}
// [r + 1 ,n] is selected
check(r + 1);
for(int i = 1 ; i <= n ; ++i)
printf("%d" ,choose[i]);
puts("");
}
int main(){
int T = read();
for(int i = 1 ; i <= T ; ++i){
solve();
}
return 0;
}
Code of solution 2
By waaitg
#include<cstdio>
int a[100005],b[100005];
int main(){
int T;
scanf("%d",&T);
while(T--){
int n,iq;
scanf("%d%d",&n,&iq);
for(int i=1;i<=n;++i)
scanf("%d",&a[i]);
int sum=0,nq=0;
for(int i=n;i>=1;--i){
if(a[i]<=nq)b[i]=1;
else if(nq<iq)++nq,b[i]=1;
else b[i]=0;
}
for(int i=1;i<=n;++i)
printf("%d",b[i]);
puts("");
}
return 0;
}
Unfortunately, a harder version of this problem has appeared in a codechef contest here.
In fact, this problem was come up with more than 1 year ago, earlier than the problem in that codechef contest. We are sorry again.
Solution
Tutorial is loading...
Code
By waaitg
#include<cstdio>
#include<cstring>
#include<iostream>
#include<algorithm>
#define ch() getchar()
#define pc(x) putchar(x)
using namespace std;
template<typename T>void read(T&x){
static char c;static int f;
for(c=ch(),f=1;c<'0'||c>'9';c=ch())if(c=='-')f=-f;
for(x=0;c>='0'&&c<='9';c=ch())x=x*10+(c&15);x*=f;
}
template<typename T>void write(T x){
static char q[65];int cnt=0;
if(x<0)pc('-'),x=-x;
q[++cnt]=x%10,x/=10;
while(x)
q[++cnt]=x%10,x/=10;
while(cnt)pc(q[cnt--]+'0');
}
const int maxn=100005;
int a[maxn];
int main(){
int T;read(T);
while(T--){
int n;read(n);
for(int i=1;i<=n;++i)read(a[i]);
for(int i=n-1;i>=1;--i){
int pre=a[i+1],ok=false;
for(int j=i;j>=1;--j){
ok=(a[j]==0);
pre-=(a[j]=pre-a[j]);
if(ok){
sort(a+j,a+i+1);
break;
}
}
if(!ok)
sort(a+1,a+i+1);
}
write(a[1]),pc('\n');
}
return 0;
}
/*
_|_|_|_| _|_| _| _| _|_|_|_|_| _|_|_|_|_| _|
_| _| _| _| _|_| _| _| _| _|
_| _| _| _| _| _| _| _| _| _|
_| _| _|_|_|_|_| _| _| _| _| _|_|_|_|_| _|
_| _| _| _| _| _| _| _| _| _|
_| _| _| _| _| _|_| _| _| _|
_|_|_|_| _| _| _| _| _|_|_|_|_| _|_|_|_|_| _|_|_|_|_|
_|_|_|_|_| _| _| _|_|_|_| _| _|
_| _| _| _| _| _| _|
_| _| _| _| _| _| _|
_| _|_| _| _| _|_|
_| _| _| _| _| _|
_| _| _| _| _| _|
_| _| _| _|_|_|_| _|
_| _|_|_| _| _| _|_|_|_|_|
_| _| _| _| _| _|
_| _| _| _| _| _|
_| _| _| _| _| _|_|_|_|_|
_| _| _| _| _| _|
_| _| _| _|_| _|
_|_|_|_|_| _|_|_| _| _|_|_|_|_|
_|_|_|_|_| _| _| _|_|_|_|_|
_| _| _| _|
_| _| _| _|
_| _|_| _|
_| _| _|
_| _| _|
_| _| _|
Created by xiaolilsq.
*/
idea: waaitg
Solution
Tutorial is loading...
Code
By waaitg
#include<cstdio>
#include<cstring>
#include<iostream>
#include<algorithm>
#define ch() getchar()
#define pc(x) putchar(x)
using namespace std;
template<typename T>void read(T&x){
static char c;static int f;
for(c=ch(),f=1;c<'0'||c>'9';c=ch())if(c=='-')f=-f;
for(x=0;c>='0'&&c<='9';c=ch())x=x*10+(c&15);x*=f;
}
template<typename T>void write(T x){
static char q[65];int cnt=0;
if(x<0)pc('-'),x=-x;
q[++cnt]=x%10,x/=10;
while(x)
q[++cnt]=x%10,x/=10;
while(cnt)pc(q[cnt--]+'0');
}
const int maxn=100005,maxm=200005;
int par[maxn];
int AC(int x){
return par[x]==x?x:par[x]=AC(par[x]);
}
struct Edge{
int v,nt;
Edge(int v=0,int nt=0):
v(v),nt(nt){}
}e[maxn*2],es[maxm];
int hd[maxn],num;
void qwq(int u,int v){
e[++num]=Edge(v,hd[u]),hd[u]=num;
}
int dp[maxn];
int pa[maxn][20];
void dfs(int u,int p){
pa[u][0]=p;
for(int j=1;(1<<j)<=dp[u];++j)
pa[u][j]=pa[pa[u][j-1]][j-1];
for(int i=hd[u];i;i=e[i].nt){
int v=e[i].v;
if(v==p)continue;
dp[v]=dp[u]+1;dfs(v,u);
}
}
int jump(int x,int t){
for(int cn=0;t;t>>=1,++cn)
if(t&1)x=pa[x][cn];
return x;
}
int lca(int x,int y){
if(dp[x]>dp[y])x=jump(x,dp[x]-dp[y]);
if(dp[y]>dp[x])y=jump(y,dp[y]-dp[x]);
if(x==y)return x;
for(int t=19;t>=0;--t)
if(pa[x][t]!=pa[y][t])
x=pa[x][t],y=pa[y][t];
return pa[x][0];
}
int vis[maxn];
char s[maxn];
void pushdown(int u,int p){
if(vis[u]==0)s[u]='1';
else s[u]='0';
for(int i=hd[u];i;i=e[i].nt){
int v=e[i].v;
if(v==p)continue;
vis[v]+=vis[u];
pushdown(v,u);
}
}
int main(){
int n,m;
read(n),read(m);
for(int i=1;i<=n;++i)
par[i]=i;
int tp=0;
for(int i=1;i<=m;++i){
int u,v;
read(u),read(v);
if(AC(u)==AC(v)){
es[++tp]=Edge(u,v);
}
else{
par[AC(u)]=AC(v);
qwq(u,v),qwq(v,u);
}
}
int root=1;
dfs(root,0);
for(int i=1;i<=tp;++i){
int u=es[i].v,v=es[i].nt;
if(dp[u]>dp[v])u^=v^=u^=v;
int w=lca(u,v);
if(u==w){
--vis[v];
++vis[jump(v,dp[v]-dp[u]-1)];
}
else{
++vis[root];
--vis[u];
--vis[v];
}
}
pushdown(root,0);
s[n+1]='\0';
printf("%s\n",s+1);
return 0;
}
Solution
Tutorial is loading...
Code
By Imakf
#include <bits/stdc++.h>
#define debug(...) fprintf(stderr ,__VA_ARGS__)
#define __FILE(x)\
freopen(#x".in" ,"r" ,stdin);\
freopen(#x".out" ,"w" ,stdout)
#define LL long long
const int MX = 2000 + 23;
LL MOD;
using namespace std;
int read(){
char k = getchar(); int x = 0;
while(k < '0' || k > '9') k = getchar();
while(k >= '0' && k <= '9') x = x * 10 + k - '0' ,k = getchar();
return x;
}
int head[MX] ,tot = 1;
struct edge{
int node ,next;
}h[MX << 1];
void addedge(int u ,int v ,int flg = 1){
// if(flg) debug("%d %d\n" ,u ,v);
h[++tot] = (edge){v ,head[u]} ,head[u] = tot;
if(flg) addedge(v ,u ,false);
}
int n ,dp[MX][MX] ,S[MX][MX] ,suf[MX][MX] ,pre[MX][MX];
void dapai(int x ,int f){
int ch = 0;
for(int i = head[x] ,d ; i ; i = h[i].next){
if((d = h[i].node) == f) continue;
dapai(d ,x);
}
for(int i = head[x] ,d ; i ; i = h[i].next){
if((d = h[i].node) == f) continue;
++ch;
for(int j = 0 ; j <= n ; ++j){
suf[ch][j] = pre[ch][j] = S[d][j];
}
}
if(!ch){
for(int i = 1 ; i <= n ; ++i){
dp[x][i] = 1 % MOD;
S[x][i] = (S[x][i - 1] + dp[x][i]) % MOD;
}
return ;
}
for(int j = 0 ; j <= n ; ++j){
for(int i = 1 ; i <= ch ; ++i)
pre[i][j] = 1LL * pre[i][j] * pre[i - 1][j] % MOD;
for(int i = ch ; i >= 1 ; --i)
suf[i][j] = 1LL * suf[i][j] * suf[i + 1][j] % MOD;
}
for(int i = 1 ; i <= n ; ++i) dp[x][i] = pre[ch][i];
if(x != 1) for(int i = head[x] ,d ,c = 0 ; i ; i = h[i].next){
if((d = h[i].node) == f) continue;
++c;
LL sum = 0;
for(int mx = 1 ; mx <= n ; ++mx){
dp[x][mx] = (dp[x][mx] + sum * dp[d][mx]) % MOD;
sum = (sum + 1LL * pre[c - 1][mx] * suf[c + 1][mx]) % MOD;
}
}
for(int i = 1 ; i <= ch ; ++i)
for(int j = 0 ; j <= n ; ++j)
suf[i][j] = pre[i][j] = 1 % MOD;
for(int i = 1 ; i <= n ; ++i)
S[x][i] = (S[x][i - 1] + dp[x][i]) % MOD;
}
int C[MX][MX];
void init(){
for(int i = 0 ; i < MX ; ++i) C[i][0] = 1 % MOD;
for(int i = 1 ; i < MX ; ++i)
for(int j = 1 ; j < MX ; ++j)
C[i][j] = (C[i - 1][j - 1] + C[i - 1][j]) % MOD;
}
int main(){
n = read() ,MOD = read();
init();
for(int i = 0 ; i < MX ; ++i)
for(int j = 0 ; j < MX ; ++j)
suf[i][j] = pre[i][j] = 1 % MOD;
for(int i = 2 ; i <= n ; ++i){
addedge(read() ,read());
// addedge(rand() % (i - 1) + 1 ,i);
}
dapai(1 ,0);
for(int i = 1 ; i < n ; ++i){
LL ans = 0;
for(int j = 1 ; j <= i ; ++j){
ans += ((i - j) & 1 ? -1LL : 1LL) * C[i][j] * dp[1][j] % MOD;
}
ans = (ans % MOD + MOD) % MOD;
printf("%lld%c" ,ans ," \n"[i == n]);
}
return 0;
}
Solution
Tutorial is loading...
Code
By waaitg
#include<cstdio>
#include<cstring>
#include<iostream>
#include<algorithm>
#define ch() getchar()
#define pc(x) putchar(x)
using namespace std;
template<typename T>void read(T&x){
static char c;static int f;
for(f=1,c=ch();c<'0'||c>'9';c=ch())if(c=='-')f=-f;
for(x=0;c>='0'&&c<='9';c=ch()){x=x*10+(c&15);}x*=f;
}
template<typename T>void write(T x){
static char q[64];int cnt=0;
if(x==0)return pc('0'),void();
if(x<0)pc('-'),x=-x;
while(x)q[cnt++]=x%10+'0',x/=10;
while(cnt--)pc(q[cnt]);
}
const int maxn=100005,maxq=100005,maxt=34;
const int inf=0x3f3f3f3f;
int a[maxn];
struct Segment{
int l,r;
Segment(int l=inf,int r=-inf):
l(l),r(r){}
Segment operator * (const Segment o)const{
return Segment(min(l,o.l),max(r,o.r));
}
bool operator != (const Segment o)const{
return l!=o.l||r!=o.r;
}
void input(void){
read(l),read(r);
}
}sg[maxt][maxn],tr[maxn],qy[maxq];
int n;
void res(void){
for(int i=1;i<n;++i)
tr[i]=Segment();
}
void add(int p,Segment v){
while(p<n){
tr[p]=tr[p]*v;
p+=(p&(-p));
}
}
Segment ask(int p){
Segment re;
while(p){
re=re*tr[p];
p^=(p&(-p));
}
return re;
}
struct Edge{
int v,w,nt;
Edge(int v=0,int w=0,int nt=0):
v(v),w(w),nt(nt){}
}e[maxn];
int num,hd[maxn];
void ins(int u,int v,int w){
e[++num]=Edge(v,w,hd[u]),hd[u]=num;
}
int st[maxq],tp;
long long Ans[maxq];
int main(){
int q;read(n),read(q);
if(n==1){while(q--)puts("0");return 0;}
for(int i=1;i<=n;++i)read(a[i]);
for(int i=1;i<n;++i){
if(a[i]<=a[i+1])
sg[0][i]=Segment(a[i],a[i+1]);
else
sg[0][i]=Segment(a[i+1],a[i]);
}
for(int i=1;i<maxt;++i){
for(int j=1;j<n;++j){
if(sg[i-1][j].l>=sg[i-1][j].r)sg[i][j]=Segment();
else ins(sg[i-1][j].l,sg[i-1][j].r-1,j);
}
res();
for(int j=n-1;j>=1;--j){
add(j,sg[i-1][j]);
for(int k=hd[j];k;k=e[k].nt){
sg[i][e[k].w]=ask(e[k].v);
}
hd[j]=0;
}
num=0;
}
for(int i=1;i<=q;++i){
qy[i].input();
if(qy[i].l==qy[i].r)Ans[i]=-2;
else ins(qy[i].l,qy[i].r-1,i);
}
res();
for(int i=n-1;i>=1;--i){
add(i,sg[maxt-1][i]);
for(int j=hd[i];j;j=e[j].nt){
if(i==1&&e[j].v==n-1)
Ans[e[j].w]=-1;
else if(ask(e[j].v)!=Segment(1,n))
Ans[e[j].w]=-2;
else
st[++tp]=e[j].w;
}
hd[i]=0;
}
num=0;
for(int i=maxt-2;~i;--i){
for(int j=1;j<=tp;++j){
ins(qy[st[j]].l,qy[st[j]].r-1,st[j]);
}
res();
for(int j=n-1;j>=1;--j){
add(j,sg[i][j]);
for(int k=hd[j];k;k=e[k].nt){
Segment tmp=ask(e[k].v);
int id=e[k].w;
if(tmp!=Segment(1,n))
Ans[id]|=(1ll<<i),qy[id]=tmp;
}
hd[j]=0;
}
num=0;
}
for(int i=1;i<=q;++i)
write(++Ans[i]),pc('\n');
return 0;
}
This is a noun almost one year ago when this problem came out.
Solution
Tutorial is loading...
Code
By waaitg
#include<cstdio>
#include<cstring>
#include<iostream>
#include<algorithm>
#include<assert.h>
#define ch() getchar()
#define pc(x) putchar(x)
template<typename T>void read(T&x){
static char c;static int f;
for(c=ch(),f=1;c<'0'||c>'9';c=ch())if(c=='-')f=-f;
for(x=0;c>='0'&&c<='9';c=ch())x=x*10+(c&15);x*=f;
}
template<typename T>void write(T x){
static char q[65];int cnt=0;
if(x<0)pc('-'),x=-x;
q[++cnt]=x%10,x/=10;
while(x)
q[++cnt]=x%10,x/=10;
while(cnt)pc(q[cnt--]+'0');
}
const int maxn=20000005;
struct Count{
int pos,val;
Count(int pos=-1,int val=-1):
pos(pos),val(val){}
};
int plus(int x,int y){
return (x<0||y<0)?-1:x+y;
}
Count operator + (const Count A,const Count B){
int Ap=A.pos,Bp=B.pos,va=plus(A.val,B.val);
if(~Ap&&~Bp)return Ap^Bp?Count():Count(Ap|Bp,va);
if(~Ap||~Bp)return Count(Ap&Bp,va);
return Count(-1,va);
}
Count operator * (const Count A,const Count B){
int Ap=A.pos,Bp=B.pos,va=plus(A.val,B.val);
if(~Ap&&~Bp)return Count(Ap^Bp,va);
if(~Ap||~Bp)return Count(-1,va);
return Count(-1,plus(va,1));
}
int c[maxn];
int tn,Op[maxn],Cn[maxn];
Count dp[maxn];
int t,hb[maxn],rev[maxn];
void pushup(int p){
int l=p<<1,r=l|1;
dp[p]=((t&hb[p])?(dp[l]+dp[r]):(dp[l]*dp[r]));
}
int va1,sum;
void ins(int i){
int p=rev[(i&(tn-1))];
if(~c[i])Op[p]^=c[i];else ++Cn[p];
if(dp[p+tn].pos>0)--va1;else sum-=dp[p+tn].val;
dp[p+tn]=(Cn[p]?Count(-1,Cn[p]-1):Count(Op[p],0));
if(dp[p+tn].pos>0)++va1;else sum+=dp[p+tn].val;
}
void del(int i){
int p=rev[(i&(tn-1))];
if(~c[i])Op[p]^=c[i];else --Cn[p];
}
int power(int a,int x,int p){
if(x==-1)return 0;
int re=1;
while(x){
if(x&1)re=1ll*re*a%p;
a=1ll*a*a%p;x>>=1;
}
return re;
}
int main(){
int n,m,w;
read(n),read(m),read(t),read(w);
memset(c,-1,sizeof c);
for(int i=1;i<=m;++i){
int d;read(d);--d;read(c[d]);
}
tn=(n&(-n));
hb[0]=0;hb[1]=1;
for(int i=2;i<tn*2;++i)
hb[i]=(hb[i>>1]<<1);
int cn=-1;for(;(1<<(cn+1))<tn;++cn);
for(int i=1;i<tn;++i)
rev[i]=(rev[i>>1]>>1)|((i&1)<<cn);
for(int i=0;i<tn;++i)dp[i+tn].val=0;
for(int i=0;i<n;++i)ins(i);
if(t<tn)for(int i=tn-1;i>=1;--i)pushup(i);
int q;read(q);
while(q--){
int f;read(f);--f;
del(f);read(c[f]);ins(f);
int p;read(p);
if(t>=tn){
write(va1?0:power((1<<w)%p,sum,p));
}
else{
int now=(rev[(f&(tn-1))]|tn);while(now>>=1)pushup(now);
write(power((1<<w)%p,plus(dp[1].val,dp[1].pos==-1),p));
}
pc('\n');
}
return 0;
}
c was tough today :/
Yeah lol, but it was really enjoyable for me (even though I didn't solve it)
You ar so cute ^^
Thanks
MOST CREATIVE QUESTION AFTER VERY LONG TIME THAT WAS AMAZING!!
From testing, I had a solution for E that works in $$$O(n \log n)$$$ assuming some claim that I guessed is true. Unfortunately, no one could prove it's correctness nor find a counterexample. Here is the solution, hopefully someone can prove my claim :P
Let $$$A(l,r)$$$ be the answer for range $$$(l,r)$$$. Then it is easy to see that $$$A(l,r-1) \geq A(l,r)$$$ and $$$A(l+1,r) \geq A(l,r)$$$. Call $$$(l,r)$$$ important if both $$$A(l,r-1)>A(l,r)$$$ and $$$A(l+1,r) > A(l,r)$$$ are satisfied. We have implicitly found the entire $$$A$$$ if we know the important ranges and their respective answers.
Claim: there are $$$O(n)$$$ important ranges, more specifically the bound is $$$2n-3$$$.
I do not have a proof for this and just observed this from checking many arrays for small $$$n$$$. In fact, I don't think anyone was able to prove any bounds on the value of $$$A$$$ too.
Anyways, we can now easily solve the problem with 0-1 bfs in $$$O(n \log^2 n)$$$ since it is simple 2D DS. If we bash DS more, we can surprisingly make it $$$O(n \log n)$$$. The key idea is that in the DS where we want to know the value of $$$A(l,r)$$$, it suffices to check if $$$A(l,r)=k$$$ for some $$$k$$$ that is non-decreasing across queries, which allows us to cut the log.
Code: 164531480 (feel free to hack it)
I am able to prove a linear bound for it, but not able to prove the exact bound you mention.
Here is a brief sketch of the proof. Consider the important segments at a given level, say $$$lvl$$$. By important segments at a level $$$lvl$$$, I mean all the important segments for which the answer is equal to $$$lvl$$$. So for $$$level=0, [0,n-1]$$$ is the only important segment, and so on.
Now, a small detour — instead of looking at segments like $$$[l,l+1,...r]$$$ look at them as $$$[l,l+1],\cdots,[r-1,r]$$$. So, when I say the endpoints of a segment, I mean $$$[l,l+1]$$$ and $$$[r-1,r]$$$. If the segment has just two points, then it has only one unique endpoint. Now, consider that we're at level $$$lvl+1$$$, and we want to find all the important segments at this level. Then consider the end-points of all the important segments at all the previous levels (endpoint defined as above). My claim is that
There can't be any important segment at $$$lvl+1$$$ that strictly contains any of these end-points. So, for any end-point, either it's outside of a segment at this level, or, it's the end-point for that segment, but it can't be strictly inside.
Segments at the same level can only share up to an end-point. i.e. they can either be disjoint, or touch each other at a point, or have a common end-point (have two points in common)
The proof of this claim is using induction. For the first part of the claim, say there was an important segment $$$[l',r']$$$ at this level strictly containing a previously occurred endpoint $$$[i,i+1]$$$. Then since this end-point must have been the end-point of some imp. segment previously, WLOG $$$[i, j]$$$ was the imp. segment at previous level. then, $$$f([i, j])$$$ must be either equal to the entire array, or it must contain some imp. segment $$$[a,b]$$$ at a previous level. Now, if its image contains some segment at a previous level, then notice that $$$f([i+1,j-1])$$$ must be strictly inside such a segment (not even touching the ends), more precisely, $$$f([i+1,j-1])\subseteq [a+1,b-1]$$$. If not, then there could have been a smaller segment containing this segment, which is a contradiction. Similar argument can be applied for when the image is equal to the entire array, as if $$$f([i+1,j-1])$$$ was touching any of the ends of the array, we would have had a smaller segment and the segment wouldn't be important.
Now, $$$r<j$$$ (as the contrary would imply $$$[l,r]$$$ contains $$$[i,j]$$$), so $$$r \in [i+1,j-1]$$$ and hence $$$f([r])$$$ falls strictly inside the segment $$$[a,b]$$$. Also, for $$$[l,r]$$$ to be an important segment at $$$lvl+1$$$, there must be a segment at previous level that is inside $$$f([l,r])$$$. But notice that by our induction hypothesis, any such segment must either be disjoint with $$$[a,b]$$$, or inside it completely, or outside it but touching $$$[a,b]$$$ at one end, or inside/outside it but intersecting with $$$[a,b]$$$ at exactly one endpoint. One can show that for all possible scenarios, either $$$f([l,i+1])$$$ contains this segment, or $$$f([i,r])$$$ contains it, and our induction hypothesis is true. Similar arguments can be applied to prove the second part of the claim.
Using this observation, we can deduce that each end-point, when introduced at any level, can be part of two segments (one to the left and one to the right). and finally, all end-points themselves can be imp. segments, giving an upper bound of roughly $$$3n-6$$$.
$$$a_i - a_{i-1}$$$ forces
It's been a while since there was a contest this stupidly difficult
wow, thanks for superfast editorial
Just listen to my absurd mistake in B: I thought both all $$$a[i]$$$ and all $$$gcd(i, a[i])$$$ should be different. When I realized my mistake, I solved the problem in like 1 minute. But before that, I spent more than one hour.
Same :"
Same
same
Same. And I didn't realize until now
Dropped about 150 rating :(
dropped 1 :(
same
Exact same thing! Dropped 116 rating points.
same) I spent a couple of hours today solving this problem, then I gave up, read the editorial and I was like: "WTF, this won't work" and after 2-3 minutes reread the problem and realized my mistale :(
Same , I don't know why it happend , I thought we have to take all unique a[i] .
woah new lesson learned: brute force sometimes does work
I liked the idea of B. It is a good math problem for beginners.
B was easy, but many people thought that a[I] must be different
I thought that too.
Fine contest. I love problem 1707A(div2. c) and 1707B(div2. d) because they are interesting to solve. Thank you.
Every single problem is great, but the whole problemset is disappointing.
Yes, exactly, it is too boring to have several problems with the same topic, especially ones like this.
"DFS Trees" is essentially formulated as "given spanning tree of a graph, find all vertices for which the tree is a DFS-tree". This task is kind of folklore, I guess... Another interesting variation would be to find all vertices for which the given tree is a BFS-tree.
You look very similar to one of the greats..."Dale steyn"
...Your text to link here...
The MST detail ensures that we won't use an edge $$$(u, v)$$$ before using the tree edges on the path between $$$u$$$ and $$$v$$$. I think that makes the problem much easier. Does your solution work for any spanning tree?
It depends on how you formulate it... If you're given unordered adjacency lists and you need to check whether there exists some ordering that the given spanning tree is a DFS tree, than the solution would be exactly the same, i.e. check that there are no cross edges for each starting vertex.
I have a hunch that it should be possible to modify the algorithm when you're given specifically ordered adjacency lists, but I feel like it's going to be a bit more tedious...
By the way, 102129F - Milliarium Aureum is formulated almost same as DFS trees, but asks to count vertices for which the given tree is a widest paths tree, rather than whether it is DFS/BFS tree.
Able to solve only 1.
So was I.
In problem C solution 2, why is the greedy approach that she should test the contest since it will increase her iq valid? Since on one hand, it does increase her iq but also it makes her reach close to the limit q, beyond which she cannot test any contest.
You can use the same reasoning as in solution 1, that there is an optimal solution where all the skipping happens before the tests that decrease the IQ.
Div.2 C Was SICK
For Difference Array, I have another solution:
First we have an $$$O(n^2)$$$ simulation algorithm.
Notice that The same $$$n$$$ numbers will generate $$$n-1$$$ zeros after the operation, while multiple identical numbers and one of this number will have the same effect (the difference is only the number of zeros at the beginning). As well as for a number $$$m>0$$$, which is a sum of distinct non-negtive numbers $$$a_i$$$. There can be at most $$$O(\sqrt m)$$$ distinct numbers. For this problem $$$m$$$ is $$$5\times 10^5$$$. We can de-duplicate the original sequence.Then there will be only $$$O(\sqrt m)$$$ numbers left, in which case the simulation is performed, the complexity would be just $$$O(m)$$$.
can you send the code here
164517652
A bit ugly:)
Deleted
You say the complexity becomes O(m), how is this true? Shouldn't simulation be run N times, you can't just remove the leading zeros and simulate because they still effect your answer. Shouldn't the complexity be O(N * sqrt(m))?
You can make it so that you only have to deal with the unique values of the array so no matter the number of zeros, they can be processed in O(1). It is run until there is only 1 unique value and each time it is run the number of unique values, m, is decreased by a factor of sqrt(m) worst case. Answer time complexity is O(n + m + √m + √√m + √√√m ...) until √√√...m gets pretty small I guess.
Why are we neglecting zeroes ? For ex: If we have a = [0,0,2,5]. Then if we neglect 0's, we get [2,5]-> [3]. But, rather the last element should be 1 as [0,0,2,5] -> [0,2,3] -> [1,2] -> [1]. Can you please explain?
I don't mean to say delete all 0s, but if there are many 0s, leave only one
Why in problem 2C/1A, we should assume $$$Q = 0$$$ at first, which is to say: why starting with $$$Q \neq 0$$$ isn't optimized?
That's what I hate about cf editorials. Such very important details are always omitted and it is considered to be ok.
I think we can reason like that.
Imagine a chart q = q(i). It is a staircase.
Lemma: of the equal values if you don't take some, taken always have greater indices than not taken, Otherwise you can swap them in this way and get result that is at least as good,
Now consider that in optimal solution Q = x != 0. Look at the rightmost value you haven't taken. Take it. Obviously nothing is going to change on the left (from the taken value) of the chart because q remains the same there. What happens on the right? On the right all values are smaller than q by definition (you took the rightmost one) so you can still take them just the same even though q reduces by 1. Now you obtained just as good solution with Q = x — 1. By induction you can reduce it to Q = 0. So it is enough to always consider only Q=0.
Might be helpful for somebody who is/was struggling with B (alternative idea):
Just loop from 1 to n (let it be I) and for each I check whether L is divisible by I (L % I == 0) — if so ans[i] = L.
Otherwise ans[i] = L + (I — (L % I)), it is basically the nearest number to L that is divisible by I.
You are so great! It help me to understand the hole process
Happy to help :)
https://codeforces.net/contest/1708/submission/164510862 Why my this submission working for problem D? I am not sure if it will pass the constraints.
Today's C was quite hard !
Finaly I'm Pupil! the round was cool, I really liked problem C. In my opinion idea of that problem was similar to This problem
wait Difference Array is brute force? :*)
Anyone else failing test case 5509 on problem C? I would love to know what that case is
1
6 2
6 1 1 1 1 2
Your program prints 011111 instead of 111111.
Thank you so much!
How did you get the testcase ? I was doing the same mistake and sometimes it becomes difficult to think about the testcase where the code is failing.
Method 1:
Here's a logic of wrong solution:
Negate the incorrect assumption: now we have to find a case where it's optimal to lower iq despite n-i>q && there is an increase in "bads". So to break this let n=3 and q=2. Now you can see that for i=0 the first part of our new condition is true. Because we want have an increase in "bads" let a[0] be greater than q, so for example let a[0]=3. Let Doremy test this contest, so her iq drops to one. Now we have to prepare a[1] and a[2]. Because we want to make increase in "bads" after first contest, at least one of those values (a[1] or a[2]) has to be greater than current iq (1) and less or equal to previous iq (2). But if we make a[1]=2, unfortunately we won't get good testcase. So let a[2]=2. After last contest Doremy's iq will drop to zero(so it's optimal). Now we don't want a[1]'s value to bother us, so make it "easy" (for example 1). So our testcase is:
1
3 2
3 1 2.
Method 2 (simpler):
His code failed on testcase 5477, so I've copied his code and replaced
with
Then you just have to submit modified code and read the checker log.
wrong answer Token parameter [name=answer string] equals to "6_2_1_1_1_1_3_2_", doesn't correspond to pattern "[01]{6}" (test case 5477)
thank u sir this testcase helped me to get ac
For those who solved problem C, can you guys explain how did you come up with the required observations during contest?
It was clear that after we reach last index it is useless to have q more than 1. So I thought of binary search because I was thinking like there will be a certain index after which we have to test every contest, but after thinking a bit I was not finding any monotonic behavior.
So I thought of a simply greedy approach that we keep the q at index n, 1 and start traversing backwards and if a[n-1] >= q then we increase q else we keep the q same and when q becomes equal to original IQ we stop now all the places after this index will be 1 and before this index the values which are less then IQ will be one.
Solution Link
Just brute force from all the possible variants) This problem was unexpectedly hard for me.
Same here! I was not able to find a proper greedy solution during the contest.
I think I spent like, 20 minutes trying different ideas, but it all ultimately boiled down to "she can do at most q hard contests, which ones should she do?"
The complication was that doing a hard contest early can cause some later contests to become hard (that otherwise wouldn't have been hard); this initially seemed really annoying at first, but then I realized that it simply means that doing a hard contest later is better than doing a hard contest earlier.
This led to the result that the q hard contests for her to do should ideally be the last q hard contests that she encounters. In other words, once she loses her first point of IQ, she should not skip any more contests after this point. Once I mentally proved this (greedy exchange argument that skipping a hard contest B from after losing the first point of IQ cannot be better than skipping an earlier hard contest A instead), the biggest hurdle was overcome.
There were still further issues (like how this can lead to solutions where she does fewer than q hard contests, but I had to assure myself that they're still optimal), since I didn't realize I could binary search and tried to process the vector in reverse, but I think at this point, a correct implementation should eventually fall in place regardless.
My submission, FWIW: https://codeforces.net/contest/1708/submission/164517038
(I kept imagining her derpy LoLK face as she makes her decisions and struggles on hard contests, but I'm not sure if this helped me realize the critical observations or distracted me from discovering them earlier)
Spent the whole time optimizing my indexed_multiset in C, only to realise just after the contest that binary searching the answer is sufficient. :( Nice bugaboos btw!
I also spent the whole time for just finding out how to use the indexed_multiset, after which I was not even able to test it locally :(
In case someone is interested, solution 1 of Div 1A can be done in linear complexity too. Let dp[i] be the minimum value of q such that we can test all contests from i....n-1. Then dp[n-1]=1 and dp[i] = (a[i]<=dp[i+1])?dp[i+1]:dp[i+1]+1. Now we can iterate over values of i -> we take all tests with a[i] <=q (this can be maintained using prefix sums) for indices <=i-1 and then add dp[i] to it, the value of i resulting in the largest sum will be optimal.
I always face problem on finding path from dp array, can you please explain that part too.
And if possible please share some problems which requires this technique.
Thanks in advance for your help
Can someone explain C?
Lets say there are $$$k$$$ elements such that $$$a_i <= q$$$. Now suppose the ans is $$$x$$$, therefore $$$x >= k$$$. So there are $$$e = x - k$$$ extra elements that we need. The optimal way of choosing those $$$e$$$ elements is to take them from the end of the array. So if we want to check whether it is possible to make $$$answer = x$$$ then we can just simulate the process and see if it is possible to choose those extra $$$e$$$ elements from the end of the array. Knowing this we can apply binary search to find the best answer.
I would fail a phone screen very easily if the question was problem B
This was the worst contest i've ever had
This is the perfect example of editorial and solution to ruin a beginner's mind and crush their confidence ;-;
Can someone explain how to derive complexity in 1707B - Difference Array:
I understand first two sentences, but still struggle to understand why complexity is $$$AlogA$$$
Glad I'm not the only one who doesn't understand that. It should be explained in the editorial.
I assume this complexity is simply incorrect. Or correct in a strict definition of big-o notation, not the way we informally use it.
Cause even author's solution from this editorial successfully passes codechef tests (the same problem) where A is limited by 10^18
True.
I think the correct complexity would be $$$O(n\ log\ a_n)$$$, as described in codechef's solution
did anyone solve
div2 c
usingindexed set
? if yes, can you please share the code.What the heck is indexed set?
For C i used this Logic: traversing forward (i=1 to n) if(a[i]<=q) then appear in the contest but if not: then check if any occorence of a[j] = q exists (j>i) if no such occurence then appear in the contest and q-- otherwise dont appear skip the contest
(But if n-i<=q) regardlessly appear in the contest; Here is my submission : https://codeforces.net/contest/1708/submission/164519432 can anybody help me, it was not accepted and i cant find the error
Like if you consider a case like 5 5 5 5 5 5 5 5 5 5 5 5 5 2 1 1 1 1 1 and q =2, you will miss out at more 5 than considering 2.
Unfortunately, a harder version of this problem has appeared in a codechef contest here.
SAME PROBLEM
atleast look at the constraints before commenting
Bro I submitted the same solution to both of these problems codechef codeforces
Funnily even the solution from codeforces editorial successfully passes codechef tests after a couple of algo-unrelated fixes.
Which means that asymptotic analysys here was crap, no wonder no one understands it
1707C - DFS Trees If findMST(x) creates an MST, there is no cross edge in the graph.
Is there any explanation or proof about it?
Well, an MST is unique iff no non-MST edge can substitute an MST edge(we call edge included in MST MST edge) which weight is equal s.t. it's still an MST.(You can find Chinese version here.)
The problem restricts weight of every edge different from each other, so the MST must be unique.
So we've got the only MST tree and the corresponding MST edge. If we set an root
rt
in MST and any non-MST edge is cross-edge(Suppose two vertexes areu
andv
, and neitheru
is in subtree ofv
norv
inu
), the MST mustn't be an DFS tree starting fromrt
since cross edge is not allowed to appear in DFS tree of an undirected graph. If not, the non-MST edge is bigger than any edge betweenu
andv
, it can't be reached since we visit smallest edge starting from current vertex first.Additionally, after proving the theorem above, we can find that a root is disqualified iff there exists one or more non-MST edge $$$(u,v)$$$ so that the MST path between $$$u$$$ and $$$v$$$($$$u,v$$$ excluded)includes i.
Thus, we can calculate how many non-MST edges make it disqualified for every vertices, costing $$$O(nm)$$$ time in total.
We can use Rerooting DP to optimize it. Firstly, calculate the number of cross edges for the case where root is 1. And then start a DFS. When we travel outwards from a path contributing to the answer or vice versa, we update the answer.
UPD: I've found the previous algorithm having an incorrect time complexity and hacked with a graph with some vertex of high degree due to some bad implementation in the process of Rerooting DP.
Sorry 4 misleading.
I solved the bug with preprocessing toward which vertex it'll go into a path; however, introducing the Binary Lifting Algo on tree(without using finding LCA part) to find the $$$k$$$-th ancestor of every vertex, leading the running time to $$$O(m\log(n))$$$(excluding finding MST), similar to the official solution.
Code: 164904402
Hope anyone can come up with a solution to 1708E - DFS Trees to run in $$$O(m)$$$ time in rerooting process, and it's still an "online" algo; i.e., getting the answers of most vertices during DFS, but not getting it to use algos like differencing and accumulating on tree.
got, thank you very much!
For some reason, i made the stupid mistake of assuming that all the a_i need to be distinct for each i in problem B. I deserve getting demoted for that lol.
Yeah same! I eventually fixed it but glad to know I am not the only one
Codeforces editorials be like:
It is easy to see that apsodnvapowrbnapwoiv eo + awevoi ^ 23 + 34 = x/2. From this, obviously x + 239 vonwef oij 238 + ovw= 23t239ug 3ig --> 2039jjoi. Finally, we see that oawijpoi24n = 23gin 3p23 + 3gi2bj 9 )g3j.
Now we can easily solve the probem in O(log n + m^xy + q)
typical chineese editorial
can anyone pls tell me where does my code fail for the third problem?? I first marked all the numbers less than k as 1 and then I proceeded for greater numbers.
https://codeforces.net/contest/1708/submission/164523613
Take a look at Ticket 15814 from CF Stress for a counter example.
Can Someone give me counter test for my submission for problem C Submission
Alternative solution for Div1C:
Each of the edges not in MST gives us a restriction on which vertices can be good. We can view those restrictions as values, i.e. for each edge we can either add 1 to all good vertices or subtract 1 from all bad vertices. In the end vertices with values equal to the total number of additions are good.
The trick is that we actually don't need to update all good or all bad vertices for every edge, but just 2.
Pick an any vertex $$$s$$$ and run dfs on MST starting from $$$s$$$.
Now suppose that we are at the some vertex $$$v$$$ and there is an edge $$$(v, w)$$$ in the original graph, but not in MST. We can also assume that we have already visited $$$w$$$, since edges are bidirectional.
There are 2 cases:
$$$w$$$ is not an ancestor of $$$v$$$: the the only possible good vertices lie in subtrees of $$$v$$$ and $$$w$$$, so we can increment values of $$$v$$$ and $$$w$$$ by $$$1$$$
$$$w$$$ is an ancestor of $$$v$$$: let $$$p$$$ be an ancestor of $$$v$$$, such that $$$w$$$ is the parent of $$$p$$$, then all vertices that are descendants of $$$p$$$, but not descendants of $$$v$$$ are bad, so we can decrement value of $$$p$$$ by $$$1$$$ and increment value of $$$v$$$ by $$$1$$$
After that the values of each vertex $$$v$$$ is sum of the values of vertices on the path between $$$v$$$ and root $$$s$$$, so we can just run another dfs from $$$s$$$ to calculate the answer.
164545959
thanks very much, the editorial's code is similar to your statement, but your statement is much clearer, more precise, and completer than the editorial's statement.
some additions:
for case 1, why other vertices are all bad? - because when they call findMST and reach v first, they will always reach w by add (v,w), which is not in MST. reach w first is vice versa.
case 2 have similar reasons.
Do you have a bound for the number of operations in Div1E? Some submissions are assuming $$$O(n\log n)$$$ moves.
Although, "brute-force" solution for div1B works for harder version($$$a_i$$$ up to $$$10^{18}$$$) too.
Theorem(TL, DR). "Brute-force" solution(considering zeros differently) has $$$O(n \log C)$$$ time complexity.
Statement. If we have $$$k$$$ non-zeros after $$$d$$$ iterations, then initial sequence had an element, which is at least $$$\left(\frac{k - 1}{d}\right)^d$$$.
Proof. Suppose we have at least $$$k$$$ non-zeros after $$$d$$$ iterations. Then, after previous iteration we had an element greater or equal than $$$k$$$, moreover, lexicographically smallest possible sequence was $$$0, 1, 2, \ldots, k$$$. Two iterations before lexicographically smallest sequence was $$$0, 0, 1, 3, 6, 10, \ldots, C_{k}^2 $$$. By induction, we can prove, that $$$d$$$ iterations before(at the beginning), lexicographically smallest possible sequence was $$$a_i = C_{i - 1}^d$$$, and thus $$$a_n \geqslant C_{k - 1}^d \geqslant \left( \frac{k - 1}{d} \right)^d$$$.
UPD. Optimal sequence was not $$$a_i = C_{i - 1}^d + \ldots + C_{i - 1}^0$$$, but just $$$a_i = C_{i - 1}^d$$$, but theorem still holds.
Picking $$$k = 2d + 1, d = \log_2 C + 1$$$ yields $$$a_n > C$$$. So after $$$\log C + 1$$$ iterations($$$O(n \log n)$$$ each) there will be at most $$$2\log C + 3$$$ non-zeros and thus total complexity is $$$O(n \log n \log C)$$$.
But picking $$$k = \sqrt[3]{n}d + 1, d = \log_{\sqrt[3]{n}}C = \frac{3\log C}{\log n}$$$ also yields $$$a_n > C$$$. After $$$O\left(\frac{\log C}{\log n}\right)$$$ iterations we will have $$$O\left(\sqrt[3]{n} \frac{\log C}{\log n}\right)$$$ non-zeros. It is still too small, so bound $$$O(k^2 \log k)$$$ for the rest part of the algorithm is still good($$$O(n)$$$). And complexity of first $$$d$$$ iterations is $$$O\left(n \log n \frac{\log C}{\log n}\right) = O(n \log C)$$$. Thus, we have upgraded our bound to $$$O(n \log C)$$$ which is cool, actually.
Just for some illustration, how optimal sequences look like:
$$$0, 0, 0, 0, 1, 5, 15, 35, 70$$$
$$$0, 0, 0, 1, 4, 10, 20, 35$$$
$$$0, 0, 1, 3, 6, 10, 15$$$
$$$0, 1, 2, 3, 4, 5$$$
$$$1, 1, 1, 1, 1$$$
Yeah, I think my brute force is $$$O(n\log n\log V)$$$, too, where $$$n\log n$$$ is the sorting complexity.
However, my submission TLed on pretest 3.
Can you tell me why?
Ah, I see. I didn't ignore the 0's.
That was kind of interesting.
Could you explain the transition from "one iteration before" to "two iterations before". I.e. where do binomials come from?
I've made some mistake, the sequence it not a sum of binomials, but just binomials.
But the method is the following: we are trying to "invert" an iteration. How'd we do it? $$$a_1, \ldots a_n \mapsto 0, a_1, a_1 + a_2, \ldots, (a_1 + \ldots + a_n)$$$
$$$1, 1, 1, 1, 1 \to 0, 1, 2, 3, 4, 5$$$
$$$0, 1, 2, 3, 4, 5, \to 0, 0, 1, 3, 6, 10, 15$$$
Obviously, we will get the correct initial sequence, but why is it optimal?
Statement. Let $$$f$$$ be our transform, $$$\Delta$$$ -- function, mapping sequence to its pairwise differences. Then for all $$$B$$$ satisfying $$$\Delta(B) = A$$$, $$$B_i \geqslant f(A)_i$$$.
Proof. Note, that we can assume, that any inversion $$$B$$$ has form $$$0, a_{i_1}, a_{i_1} + a_{i_2}, \ldots, (a_{i_1} + \ldots + a_{i_n})$$$, where $$$i_1, \ldots, i_n$$$ is some permutation of numbers $$$1, \ldots, n$$$. $$$a_{i_1} + \ldots + a_{i_k} \geqslant a_1 + \ldots + a_k$$$.
Statement. If $$$A_i \leqslant B_i$$$, then $$$f(A)_i \leqslant f(B)_i$$$.
So $$$f(f(\ldots f(A)\ldots))$$$ gives us optimal sequence(any other has each element greater or equal to ours), and thus with least possible max element.
Very neat proof. I made a video solution on Problem D illustrating this proof.
thank you
Video Solution for Problem C and Problem D
To me, 1A is harder than 1BCD ... High quality round!
I wish editorials could explain more about how to come up with such a solution instead of only proving why the solution works.
In question C why my answer for following test case is wrong :- 5 2 5 1 2 4 3 My answer is: 11101 , why this is incorrect pls anyone explain
5 2 5 1 2 4 3 Your answer : 11101 is actually invalid. Seems you missed the fact that after testing a round with ai>q the q will decrease by one. So from the next rounds you will have less q. Going by this logic, you will have q=0 by the time you reach the 4th round and you won't be able to test 4th and 5th round.
even not able to solve C /kk
For the solution 2 of 1707A :
Why? Lets say $$$a_i = Q+1$$$. Now if we increase the IQ by $$$1$$$ at this point, it becomes $$$Q+1$$$ which is equal to $$$a_i$$$. The former was in reverse order, but in the correct order what we are creating is after testing a contest with difficulty $$$Q+1$$$, Doremy's IQ dropped from $$$Q+1$$$ to $$$Q$$$ and that's incorrect.
In that case, you can actually increase the initial q value (from 0 to 1,2,3 and so on) until this kind of cases do not happen. If you do this, you will get the "real q", which, however, is the same as the q calculated by the editorial's algorithm.
If your IQ still don't get to the upper bound after testing all round, you can also increase the initial q value. Also, this operation will not influence the contests you are able to test.
Sorry that I didn't mention this in the editorial. My English wording is bad.
E wa 7???
How can I reach the idea 'If findMST(x) creates an MST, there is no cross edge in the graph. So if you can determine whether there is a cross edge starting DFS from every node, the problem is solved.' at div1C?
I knew the MST is fixed, and tried to find which vertex's rooted dfs tree includes edges not in the MST but tried tree dp and dfs to figure it out for every vertices and failed to reach that idea.
Since the MST is fixed, we know which edges are bad edges (not in the MST).
Now given a bad edge, can we figure out the vertices $$$r$$$ such that $$$\mathrm{findMST}(r)$$$ will pick this edge?
Observe that $$$\mathrm{findMST}(r)$$$ is like a regular DFS, but with some extra logic to choose the order of exploring children. So for a moment, think of it to be a normal DFS.
Since DFS explores all edges of a child before returning to its parent, if there is a bad edge from a vertex $$$u$$$ currently being explored by the DFS algorithm, to an unvisited vertex $$$v$$$ in a different subtree that is not its ancestor, the DFS algorithm will pick this edge and explore $$$v$$$ (yielding an incorrect MST). This is nothing but a cross-edge.
So the problem boils down to finding for which vertices $$$r$$$, none of the bad edges are cross-edges in $$$\mathrm{findMST}(r)$$$.
typo in Div. 1 D?
$$$S_{x,i}=∑_{j≤i} dp_{x,i}$$$
$$$S_{x,i}=∑_{j≤i} dp_{x,j}$$$
Can someone explain why edge
(u, v)
is not a cross edge when DFS fromt
?But isn't
v
a descendant foru
in the DFS traversal?Anyone else failing test case 2161 on problem B?
My submission :- https://codeforces.net/contest/1708/submission/164581294
Now Accepted!
can someone explain problem D clearly as editorials are not clear enough about brute force approach
1D is quite interesting for me.
sorry for late, can you explain about this dp ??
In Div2 E (or Div1 C), I found out the cycle and find out the max weight of the edges on the cycle. suppose the max weight edge is between u-v, I traversed all the nodes in the cycle other than u-v, then I marked all the nodes in those subtrees as '0' in the final answer string. I am getting TLE for 7th Test case. Can anyone help?
164528852
I was able to find out a test case where Isonan submission for Div1E: Replace fails.
Submission: 164525375
Ticket 15813
Can someone hack it for me? Or let me know if I constructed an invalid test case? Thanks!
You were right.
I didn't consider some corner cases...
Div 2 problem D: Can somebody prove How it only require O(logn) operations to have array consist of <= 1 non zero elements.
Hints for Div2C?
A was tougher than B... Amazing!!!
Does anyone know why solution might be not passing 1707B 3rd test?
Wondering if anyone had the same problem
If anyone wanna check my code: 164618343
Take a look at Ticket 15838 from CF Stress for a counter example.
I misclicked the problem, so i just wasted my request :(
HaHa, no worries. I've reset it. You can try again.
Can someone post a solution for 1B, it feels like the person who wrote the editorial didn't even solve the problem.
Here's mine: https://codeforces.net/problemset/submission/1707/164635699
Basically, you keep track of where the first non-zero element is, and then perform the operations exactly as specified, except you start at the first non-zero element, i.e., start calculating differences and perform the sorting on the non-zero range (since the difference between 0s will be 0s and will remain at the left side of the array after sorting).
The actual challenge to this problem is realizing that this will not exceed the time limit. It may seem impractical, since there are 100000 elements, each as large as 500000. But if you consider a single operation, the largest element before the operation is an upper bound on the sum of ALL elements after the operation, e.g., for n = 4, $$$(a_2 - a_1) + (a_3 - a_2) + (a_4 - a_3) = a_4 - a_1$$$. Basically, the values drop down really fast, and you get a lot of 0s quickly, making it practical to actually perform every operation on the non-zero elements directly.
After excluding the zeros lets say we have
only non zero elements. a1 , a2 , a3 , .... , an
Let S = a1 + a2 + ... + an
now when we do one iteration our array becomes a2-a1 , a3-a2, ... , an — an-1
so new sum is an — a1 = S — ( a1 + a2 + ... + an-1)
S decreases least when (a1 + a2 + ... + an-1) = n-1 as all these elements are > 0
So in each step S decreases by n-1.
first it decreases by n-1 then n-2 ... (in worst case for us)and it will get to 0 fast.
So bruteforce works.
You can refer this : https://discuss.codechef.com/t/array-ops-editorial/100450/5
All great, except for the fact your array ended up with
[2,1,0,0,0]
in reality and $$$A_2$$$ is now in eternal misery being unable to change him/herself to $$$0$$$1E is really brilliant!
May I ask what is bin-up on tree?
Binary uplifting
Any List of Problems similar to Div2C?
Found this explanation for 1707B - Difference Array to be much more intuitive and easy to understand : https://discuss.codechef.com/t/array-ops-editorial/100450/6
Why does the solution of C works?
In the editorial for 1D, dp is defined as below:
I am unable to understand what do we mean by "operation" here.
Can someone please help with what one operation means?
Update:
Figured it out. It is the same operation as in the problem statement.
means none of the vertices from the subtree of $$$x$$$ in present in the partial virtual tree (i.e., set $$$U$$$ in the problem statement).
For Problem C, where is it failing 202440451
The solution of div1b is based on the defense