//////////////////////////////////////////////////////////////////////////////// // STATA FOR Sasaki, Y. & Ura, T. (2022): Estimation and Inference for // Moments of Ratios with Robustness against Large Trimming Bias. Econometric // Theory, 38 (1), pp. 66-112. // // Use it when you want to estimate the average treatment effect (ATE) robustly // against limited overlaps. //////////////////////////////////////////////////////////////////////////////// program define robustate, eclass version 14.2 syntax varlist(numeric min=3) [if] [in] [, probit h(real 0.1) k(real 4)] marksample touse gettoken depvar indepvars : varlist _fv_check_depvar `depvar' fvexpand `indepvars' local cnames `r(varlist)' tempname b V N cb local prob = 1 if "`probit'" == "" { local prob = 0 } if( `prob'== 0 ){ mata: estimate("`depvar'", "`cnames'", "`touse'", `h', `k', /// "`b'", "`V'", "`N'") } else{ mata: estimate_probit("`depvar'", "`cnames'", "`touse'", `h', `k', /// "`b'", "`V'", "`N'") } matrix colnames `b' = naiveATE robustATE matrix colnames `V' = naiveATE robustATE matrix rownames `V' = naiveATE robustATE ereturn post `b' `V', esample(`touse') buildfvinfo ereturn scalar N = `N' ereturn scalar h = `h' ereturn scalar k = `k' ereturn local cmd "robustate" if( `prob'==0 ){ ereturn local pscore "logit" } if( `prob'==1 ){ ereturn local pscore "probit" } ereturn display di "* robustATE is based on Sasaki, Y., and T. Ura (2022) Estimation and Inference" di "for Moments of Ratios with Robustness against Large Trimming Bias. Econometric" di "Theory, 38(1), pp. 66–112." end mata: //////////////////////////////////////////////////////////////////////////////// // FUNCTION FOR LOGIT ESTIMATION void logitc(todo, para, dw, crit, g, H){ n = rows(dw) dimdw = cols(dw) d = dw[.,1] w = dw[.,2..dimdw] p = cols(w) pr = 1 :/ (1 :+ exp(-1:*((J(n,1,1),w)*(para')))) crit = mean( (1:-d):*log(1:-pr:+0.001) :+ d:*log(pr:+0.001) ) } //////////////////////////////////////////////////////////////////////////////// // FUNCTION FOR PROBIT ESTIMATION void probitc(todo, para, dw, crit, g, H){ n = rows(dw) dimdw = cols(dw) d = dw[.,1] w = dw[.,2..dimdw] p = cols(w) pr = normal((J(n,1,1),w)*(para')) crit = mean( (1:-d):*log(1:-pr:+0.001) :+ d:*log(pr:+0.001) ) } void probitobj(para,dw,obj){ n = rows(dw) dimdw = cols(dw) d = dw[.,1] w = dw[.,2..dimdw] p = cols(w) pr = normal((J(n,1,1),w)*(para')) obj = mean( (1:-d):*log(1:-pr:+0.001) :+ d:*log(pr:+0.001) ) } void probitgrad(para,dw,grad){ dimpara = cols(para) grad = J(dimpara,1,0) real scalar obj, obj_DELTA probitobj(para,dw,obj) for( idx = 1 ; idx <= dimpara ; idx++ ){ para_DELTA = para para_DELTA[idx] = para_DELTA[idx] + 0.000001 probitobj(para_DELTA,dw,obj_DELTA) grad[idx,1] = (obj_DELTA - obj) / 0.000001 } } void probithessian(para,dw,hessian){ dimpara = cols(para) hessian = J(dimpara,dimpara,0) real vector grad, grad_DELTA probitgrad(para,dw,grad) for( jdx = 1 ; jdx <= dimpara ; jdx++ ){ para_DELTA = para para_DELTA[jdx] = para_DELTA[jdx] + 0.000001 probitgrad(para_DELTA,dw,grad_DELTA) hessian[.,jdx] = (grad_DELTA :- grad) :/ 0.000001 } } void probitindgrad(para,dw,indgrad){ n = rows(dw) dimpara = cols(para) indgrad = J(dimpara,n,0) for( idx = 1 ; idx <= n ; idx++ ){ real vector grad probitgrad(para,dw[idx,.],grad) indgrad[,idx] = grad } } //////////////////////////////////////////////////////////////////////////////// // FUNCTION FOR SMOOTH TRUNCATION void S(u, sout){ u = 2 :* ( u :- 0.5 ) //sout = 3:*u:^4:-8:*u:^3:+6:*u:^2 sout = 6:*u:^5:-15:*u:^4:+10:*u:^3 sout = (u :<= 0) :* 0 :+ (0 :< u :& u :< 1) :* sout :+ (1 :<= u) :* 1 } //////////////////////////////////////////////////////////////////////////////// // FUNCTIONS FOR SHIFTED LEGENDRE void choose(n,k,cout){ cout = factorial(n):/factorial(n:-k):/factorial(k) } void shifted_Legendre01(n, x, lout){ real vector choose1, choose2 choose(n,0..n,choose1) choose(n:+(0..n),(0..n),choose2) lout = (-1)^n*sum(choose1 :* choose2 :* ((-x):^(0..n))) :* (0