Description
Just in case PSC codes may pop up error signs regarding dimensional conformability for a SINGLE treated unit, you might want to try the attached codes below. I added just minor changes to "GeithnerConnections_CLUSTER.R," which Jérémy L'Hour originally posted.
@JayERyu
EXAMPLE 4: Geithner connections
Jeremy L Hour
11 avril 2017
EDITED: 10/8/2018
PARALLELIZED VERSION
Jay Ryu ([email protected]) 5/20/2021 The codes below slightly modified the original codes for "ONE" TREATED UNIT to address
a potential violation of dimensional conformability. NO adjustment was made for replicating Acemoglue et al. (2016) -
disregard multiple error signs unless you want to replicate Acemoglue et al. The same for tables.
[JR] is added to alert the slight modification in relevant places.
setwd("//ulysse/users/JL.HOUR/1A_These/A. Research/RegSynthProject/regsynth")
rm(list=ls())
Load packages
library("MASS")
library("ggplot2")
library("gtable")
library("grid")
library("reshape2")
library("LowRankQP")
library("R.matlab")
library("stargazer")
library("foreach")
library("doParallel")
Load user functions
source("functions/wsoll1.R")
source("functions/regsynth.R")
source("functions/regsynthpath.R")
source("functions/TZero.R")
source("functions/synthObj.R")
source("functions/perm.test.R") #[JR] For one treated unit, refer to function codes around the end of this file.
source("functions/conf.interval.Geithner.R")
source("functions/bias.R")
0. Loading data
data = readMat("//ulysse/users/JL.HOUR/1A_These/A. Research/RegSynthProject/regsynth/data/GeithnerConnexions/Matlab Files/Data.mat")
1. Data Cleaning and Setting
ticker = data$ticker # firms tickers
collect names and tickers
FirmID = data.frame()
for(i in 1:603){
if(length(unlist(ticker[i])) > 0 & length(unlist(ticker[603+i])) > 0){
FirmID[i,"Ticker"] = unlist(ticker[i])
FirmID[i,"Name"] = unlist(ticker[603+i])
} else {
FirmID[i,"Ticker"] = "Unknown"
FirmID[i,"Name"] = "Unknown"
}
}
X = data.frame(data$num) # firms characteristics
names(X) = c(unlist(data$VarNames)) # setting variable names
row.names(X) = FirmID[,"Ticker"] # setting firms name
ind = is.na(X[,8]) | is.na(X[,9]) | is.na(X[,10]) # eliminating firms with no data for 'ta2008_log','roe2008','tdtc2008'
X = X[!ind,]
y = as.matrix(data$Re) # Returns
y = y[,!ind]
y[is.na(y)] = 0 # replacing missing returns with zero
Identification of the event
ConnMeasure = 3 # 1: Shared Board 2: NY Connection 3: Geithner Schedule 4: Geithner Schedule 2007, position in data frame
GeiNomDate = 355 # Geithner nomination date
EventDate = GeiNomDate-1
PreTreatPeriod = (GeiNomDate-281):(GeiNomDate-32) # Window of 250 days ending 30 days prior to Geithner nomination
FalsifTest = c(340:353,357:389,395:447) # Window for falsification test
Correlation with Citi and BoA on Pre-treatment period
We want to exclude the effect of the CitiGroup bailout
No need to exclude BoA (tax problem after nomination, check page 29)
Citi = which(X[,5]==140) # Citi Group
corrCiti = cor(y[PreTreatPeriod,Citi], y[PreTreatPeriod,])
Compute Q10 for correlation distributions
corrCitiTr = sort(corrCiti,decreasing=T)[58]
X = X[corrCiti<corrCitiTr,]
y = y[,corrCiti<corrCitiTr]
Treatment variable
[JR] original for multiple treated units: d = X[,ConnMeasure] > 0 # one or more meetings in 2007-08
creating only one treated unit:
d = X[,ConnMeasure] >=13 ## [JR] just for one treated unit case
Who are the treated?
print("Treated firms and number of meetings in 2007-08")
cbind(FirmID[match(rownames(X[d==1,]), FirmID[,1]),2], X[d==1,ConnMeasure])
Control variable other than pre-treatment outcomes, useless for now
Include:
- ta2008_log : firm size
- roe2008 : profitability
- tdtc2009 : leverage
Z = cbind(X[,c(8,9,10)], X[,c(8,9,10)]^2, X[,c(8,9,10)]^3)
2. Some Descriptive Statistics (TO BE CONTINUED ?)
ConnReturns = ts(apply(y[PreTreatPeriod,d==1],1,mean),start=c(1), freq=365) #[JR] may not work for one treated unit
NConnReturns = ts(apply(y[PreTreatPeriod,d==0],1,mean),start=c(1), freq=365)
Balance check
Treated
apply(X[d==1,c(8,9,10)],2,summary)
Control
apply(X[d==0,c(8,9,10)],2,summary)
3. CV for selecting optimal lambda
[JR] original: X0 = y[PreTreatPeriod,d==0]; X1 = y[PreTreatPeriod,d==1]
[JR] original: Y0 = y[GeiNomDate+1,d==0]; Y1 = y[GeiNomDate+1,d==1]
[JR] added as.matrix to X1 and Y1 for a single treated unit but the following also works for multiple T units.
X0 = data.matrix(y[PreTreatPeriod,d==0])
X1 = data.matrix(y[PreTreatPeriod,d==1])
Y0 = data.matrix(y[GeiNomDate+1,d==0])
Y1 = data.matrix(y[GeiNomDate+1,d==1])
V = diag(1/diag(var(t(y[PreTreatPeriod,])))) # Reweight by inverse of variance
lambda = c(seq(0,0.1,.0025),seq(0.1,2,.1)) # sequence of lambdas to test
estval = regsynthpath(X0,X1,Y0,Y1,V,lambda,tol=1e-6)
MSPE = vector(length=length(lambda))
[JR] For a single T unit, this dimension differs from other multiple T unit cases.
Thus, for one T unit, take out t (transpose) in front of estval in the loop for MSPE below.
for(k in 1:length(lambda)){
MSPE[k] = mean(apply((y[324:354,d==1] - y[324:354,d==0]%*%(estval$Wsol[k,,]))^2,2,mean))
}
lambda.opt.MSPE = min(lambda[which(MSPE==min(MSPE))]) # Optimal lambda is .1-.2
lambda.opt.MSPE
min(MSPE)
Figure 1: MSPE
pdf("plot/Geithner_MSPE_CLUSTER.pdf", width=6, height=6)
matplot(lambda, MSPE, type="b", pch=20, lwd=1,
main=expression("MSPE, "lambda^{opt}"= .1, computed on 30-day window"), col="steelblue",
xlab=expression(lambda), ylab="MSPE")
abline(v=lambda.opt.MSPE,lty=2,lwd=2,col="grey")
dev.off()
4. Estimation
4.1 Penalized Synthetic Control
Psol = estval$Wsol[which(MSPE==min(MSPE)),,]
colnames(Psol) = rownames(X[d==0,]) # [JR] might not work for a single treated unit
Number of active controls
apply(Psol>0,1,sum) # [JR] might not work for a single treated unit
print("mean nb. active control units"); mean(apply(Psol>0,1,sum))
Compute the statistics (see paper)
TestPeriod = (GeiNomDate-15):(GeiNomDate+30)
phiP = apply((y[TestPeriod,d==1] - y[TestPeriod,d==0]%*%(Psol)),1,mean) # [JR] took out t in front of (Psol) for a single treated unit
phiP_bc = phiP - apply(bias(X0,X1,y[TestPeriod,],d,Psol),1,mean) # bias corrected
sigma = sqrt(apply((X1 - X0%*%(Psol))^2,2,mean)) # Goodness of fit for each treated over pre-treatment period, used in the original paper
# [JR] took out t in front of (Psol) for a single treated unit
sigma_cutoff = mean(sigma) # for later use: correction during Fisher test
mean(phiP_bc)
4.2 Non-Penalized Synthetic Control
NPsol = estval$Wsol[1,,]
colnames(NPsol) = rownames(X[d==0,]) # [JR] might not work for a single treated unit
phiNP = apply((y[TestPeriod,d==1] - y[TestPeriod,d==0]%*%(NPsol)),1,mean) # [JR] took out t in front of (NPsol) for a single treated unit
phiNP_bc = phiNP - apply(bias(X0,X1,y[TestPeriod,],d,NPsol),1,mean) # bias corrected
sigma = sqrt(apply((X1 - X0%*%(NPsol))^2,2,mean)) # [JR] took out t in front of (NPsol) for a single treated unit
sigma_cutoffNP = mean(sigma)
Number of active controls
apply(NPsol>0,1,sum) # [JR] might not work for a single treated unit
print("mean nb. active control units"); mean(apply(NPsol>0,1,sum)) # [JR] might not work for a single treated unit
5. Fisher Test of No-Effect Assumption (C=0)
set.seed(1207990)
R = 100 # Number of replications: original 20000
alpha = sqrt(3) # correction cut-off (see original paper)
lambda.set = c(seq(0,0.1,.01),seq(.2,1.5,.1)) # sequence of lambdas to test
comb <- function(x, ...) {
lapply(seq_along(x),
function(i) c(x[[i]], lapply(list(...), function(y) y[[i]])))
}
cores=detectCores()
cl = makeCluster(20) #not to overload your computer
registerDoParallel(cl)
t_start = Sys.time()
Res_PAR <- foreach(r = 1:R,.packages='LowRankQP',.combine='comb', .multicombine=TRUE) %dopar% {
dstar = sample(d)
X0star = data.matrix(y[PreTreatPeriod,dstar==0]); X1star = data.matrix(y[PreTreatPeriod,dstar==1]) # [JR] added data.matrix different from original
SELECTION OF LAMBDA OPT FOR THIS ITERATION
estval = regsynthpath(X0star, X1star,Y0,Y1,V,lambda.set,tol=1e-6)
MSPE = vector(length=length(lambda.set))
for(k in 1:length(lambda.set)){
MSPE[k] = mean(apply((y[324:354,dstar==1] - y[324:354,dstar==0]%*%(estval$Wsol[k,,]))^2,2,mean))
} # [JR] took out t before (estval)
Wsolstar = estval$Wsol[which(MSPE==min(MSPE)),,] # COLLECT W(lambda.opt)
Not corrected
ResultP = apply((y[TestPeriod,dstar==1] - y[TestPeriod,dstar==0]%*%(Wsolstar)),1,mean) # [JR] took out t before (Wsolstar)
Corrected (as in original paper)
sigmastar = sqrt(apply((X1star - X0star%%(Wsolstar))^2,2,mean)) # [JR] took out t before (Wsolstar)
omegastar_C = rep(1,sum(d))
omegastar_C[sigmastar>alphasigma_cutoff] = 0
omegastar_C = omegastar_C/sum(omegastar_C)
ResultP_C = (y[TestPeriod,dstar==1] - y[TestPeriod,dstar==0]%%(Wsolstar))%%omegastar_C # [JR] take out t before (Wsolstar)
Bias correction
ResultP_BC = ResultP - apply(bias(X0star,X1star,y[TestPeriod,],dstar,Wsolstar),1,mean)
NON-PENALIZED, LAMBDA=0
NPsolstar = estval$Wsol[1,,]
Not corrected
ResultNP = apply((y[TestPeriod,dstar==1] - y[TestPeriod,dstar==0]%*%(NPsolstar)),1,mean) # [JR] take out t before (NPsolstar)
Corrected
sigmastar = sqrt(apply((X1star - X0star%%(NPsolstar))^2,2,mean)) # [JR] take out t before (NPsolstar)
omegastar_C = rep(1,sum(d))
omegastar_C[sigmastar>alphasigma_cutoffNP] = 0
omegastar_C = omegastar_C/sum(omegastar_C)
ResultNP_C = (y[TestPeriod,dstar==1] - y[TestPeriod,dstar==0]%%(NPsolstar))%%omegastar_C # [JR] took out t before (NPsolstar)
Bias corrected
ResultNP_BC = ResultNP - apply(bias(X0star,X1star,y[TestPeriod,],dstar,NPsolstar),1,mean)
list(list(ResultP),list(ResultP_C),list(ResultP_BC),list(ResultNP),list(ResultNP_C),list(ResultNP_BC))
}
print(Sys.time()-t_start)
stopCluster(cl)
ResultP = t(matrix(unlist(Res_PAR[[1]]),ncol=R))
ResultP_C = t(matrix(unlist(Res_PAR[[2]]),ncol=R))
ResultP_BC = t(matrix(unlist(Res_PAR[[3]]),ncol=R))
ResultNP = t(matrix(unlist(Res_PAR[[4]]),ncol=R))
ResultNP_C = t(matrix(unlist(Res_PAR[[5]]),ncol=R))
ResultNP_BC = t(matrix(unlist(Res_PAR[[6]]),ncol=R))
5.1 Tables
Penalized / Non-Corrected
cumphiP = cumsum(phiP[16:length(phiP)])
cumResultP = t(apply(ResultP[,16:length(phiP)],1,cumsum))
cumphi_q = t(mapply(function(t) quantile(cumResultP[,t], probs = c(.005,.025,.05,.95,.975,.995)), 1:ncol(cumResultP)))
TableP = data.frame("Estimate"=cumphiP,"Q"=cumphi_q)
print("Event day 0"); print(TableP[1,])
print("Event day 10"); print(TableP[11,])
Penalized / Corrected
cumResult_C = t(apply(ResultP_C[,16:length(phiP)],1,cumsum))
cumphi_qC = t(mapply(function(t) quantile(cumResult_C[,t], probs = c(.005,.025,.05,.95,.975,.995)), 1:ncol(cumResult_C)))
TableP_Corrected = data.frame("Estimate"=cumphiP,"Q"=cumphi_qC)
Penalized / Bias Correction
cumphiP_BC = cumsum(phiP_bc[16:length(phiP_bc)])
cumResult_BC = t(apply(ResultP_BC[,16:length(phiP_bc)],1,cumsum))
cumphi_qBC = t(mapply(function(t) quantile(cumResult_BC[,t], probs = c(.005,.025,.05,.95,.975,.995)), 1:ncol(cumResult_BC)))
TableP_BC = data.frame("Estimate"=cumphiP_BC,"Q"=cumphi_qBC)
Non-Penalized / Non-Corrected
cumphiNP = cumsum(phiNP[16:length(phiNP)])
cumResultNP = t(apply(ResultNP[,16:length(phiNP)],1,cumsum))
cumphi_q = t(mapply(function(t) quantile(cumResultNP[,t], probs = c(.005,.025,.05,.95,.975,.995)), 1:ncol(cumResultNP)))
TableNP = data.frame("Estimate"=cumphiNP,"Q"=cumphi_q)
Non-Penalized / Corrected
cumResult_C = t(apply(ResultNP_C[,16:length(phiNP)],1,cumsum))
cumphi_qC = t(mapply(function(t) quantile(cumResult_C[,t], probs = c(.005,.025,.05,.95,.975,.995)), 1:ncol(cumResult_C)))
TableNP_Corrected = data.frame("Estimate"=cumphiNP,"Q"=cumphi_qC)
Non-Penalized / Bias Correction
cumphiNP_BC = cumsum(phiNP_bc[16:length(phiNP_bc)])
cumResultNP_BC = t(apply(ResultNP_BC[,16:length(phiNP_bc)],1,cumsum))
cumphiNP_qBC = t(mapply(function(t) quantile(cumResultNP_BC[,t], probs = c(.005,.025,.05,.95,.975,.995)), 1:ncol(cumResultNP_BC)))
TableNP_BC = data.frame("Estimate"=cumphiNP_BC,"Q"=cumphiNP_qBC)
ToPrint = t(rbind(TableP[1,],TableP[11,],TableP_Corrected[1,],TableP_Corrected[11,],TableP_BC[1,],TableP_BC[11,],
TableNP[1,],TableNP[11,],TableNP_Corrected[1,],TableNP_Corrected[11,],TableNP_BC[1,],TableNP_BC[11,]))
colnames(ToPrint) = c("Penalized, NC, Day 0","Penalized, NC, Day 10","Penalized, C, Day 0","Penalized, C, Day 10","Penalized, BC, Day 0","Penalized, BC, Day 10",
"Non-Penalized, NC, Day 0","Non-Penalized, NC, Day 10","Non-Penalized, C, Day 0","Non-Penalized, C, Day 10","Non-Penalized, BC, Day 0","Non-Penalized, BC, Day 10")
stargazer(t(ToPrint))
fileConn = file("plot/GeithnerResultTable_CLUSTER.txt")
writeLines(stargazer(t(ToPrint)), fileConn)
close(fileConn)
A. Not corrected
Compute .025 and .975 quantiles of CAR for each date
phi_q = t(mapply(function(t) quantile(ResultP[,t], probs = c(.005,.025,.975,.995)), 1:length(TestPeriod))) #[JR] ResultP_BC for bias-correction
ATTdata = ts(cbind(phi_q[,1:2],phiP,phi_q[,3:4]),start=c(-15), freq=1) #[JR] phiP_bc for bias-correction
Figure 2: Geithner connected firms effect vs. random permutations (Currently in paper)
pdf("plot/GeithnerAR_FisherTest_CLUSTER.pdf", width=10, height=6)
plot(ATTdata, plot.type="single",
col=c("firebrick","firebrick","firebrick","firebrick","firebrick"), lwd=c(1,1,2,1,1),
lty=c(3,4,1,4,3),xlab="Day", ylab="AR, in pp",
ylim=c(-.15,.15))
abline(h=0,
lty=2,col="grey")
lim <- par("usr")
rect(0, lim[3], lim[2], lim[4], col = rgb(0.5,0.5,0.5,1/4))
axis(1) ## add axes back
axis(2)
box()
legend(-15,-.075,
legend=c("Estimate", ".95 confidence bands of Fisher distrib.",".99 confidence bands of Fisher distrib."),
col=c("firebrick","firebrick"), lwd=c(2,1,1),
lty=c(1,4,3))
dev.off()
6. .95 Confidence interval for CAR[0] and CAR[10]
GeiCI0 = conf.interval.Geithner(d,as.matrix(y[GeiNomDate,]),t(y[PreTreatPeriod,]),V,lambda=lambda.opt.MSPE,B=20000,alpha=.05)
fileConn = file("plot/outputCI0_CLUSTER.txt")
writeLines(paste(GeiCI0$c.int), fileConn)
close(fileConn)
GeiCI10 = conf.interval.Geithner(d,t(y[GeiNomDate:(GeiNomDate+10),]),t(y[PreTreatPeriod,]),V,lambda=lambda.opt.MSPE,B=20000,alpha=.05)
fileConn = file("plot/outputCI10_CLUSTER.txt")
writeLines(paste(GeiCI10$c.int), fileConn)
close(fileConn)
Permutation distributions
#[JR] modify for one treated unit
For the functions below, some arguments are assumed invoked somewhere above (like V, Psol, d)
###Auxiliary function: [JR] modified L'Hour's original codes.
permutation.iter.C=function(d,y,V,lambda.perm.set){
dstar=sample(d)
SX0=data.matrix(y[PreTreatPeriod,dstar==0])
SX1=data.matrix(y[PreTreatPeriod,dstar==1])
SSY0=data.matrix(y[GeiNomDate+1,dstar==0])
SSY1=data.matrix(y[GeiNomDate+1,dstar==1])
SY0=data.matrix(y[TestPeriod,dstar==0])
SY1=data.matrix(y[TestPeriod,dstar==1])
SELECTION OF LAMBDA OPT FOR THIS ITERATION
lambda.perm.set = c(seq(0,0.1,.01),seq(.2,1.5,.1)) # sequence of lambdas to test
estval=regsynthpath(SX0,SX1,SSY0,SSY1,V,lambda.perm.set,tol=1e-6)
MSPE = vector(length=length(lambda.perm.set))
for(k in 1:length(lambda.perm.set)){
MSPE[k] = mean(apply((y[324:354,dstar==1] - y[324:354,dstar==0]%*%(estval$Wsol[k,,]))^2,2,mean))
} # [Jay Ryu] take out t before (estval) - do the same in all the relevant places.
Wsol_perm = estval$Wsol[which(MSPE==min(MSPE)),,] # COLLECT W(lambda.opt)
MSPE_post=mean(apply((SY1-SY0%%(Wsol_perm))^2,2,mean))
MSPE_pre=mean(apply((SX1-SX0%%(Wsol_perm))^2,2,mean))
ratio_star=MSPE_post/MSPE_pre
ATT_star=mean(apply((SY1-SY0%*%(Wsol_perm)),1,mean))
return(ratio_star)
}
perm.test=function(d,y,V,lambda.perm.set,R=1000){
PX0=data.matrix(y[PreTreatPeriod,d==0])
PX1=data.matrix(y[PreTreatPeriod,d==1])
PY0=data.matrix(y[TestPeriod,d==0])
PY1=data.matrix(y[TestPeriod,d==1]) #use Psol for the following
MSPE_po_A=mean(apply((PY1-PY0%%(Psol))^2,2,mean))
MSPE_pr_A=mean(apply((PX1-PX0%%(Psol))^2,2,mean))
ratio=MSPE_po_A/MSPE_pr_A
ATT=mean(apply((PY1-PY0%*%(Psol)),1,mean))
theta.reshuffled =replicate(R,permutation.iter.C(d,y,V,lambda.perm.set),simplify = "vector")
#compute p-value
p.val=mean(abs(theta.reshuffled)>=abs(ratio))
print(paste("p_value:",p.val))
return(list(p.val=p.val,theta.obs=ATT))
}
perm.test(d,y,V,lambda.perm.set,10) # [JR] R=10 here to shorten running time.
Permutation distributions -- Bias Corrected
[JR] modify for one treated unit
For the functions below, some arguments are assumed invoked somewhere above (like V, Psol, d)
###Auxiliary function: [Jay Ryu] modified L'Hour's original codes.
permutation.iter.C_BC=function(d,y,V,lambda.perm.set){
dstar=sample(d)
SX0=data.matrix(y[PreTreatPeriod,dstar==0])
SX1=data.matrix(y[PreTreatPeriod,dstar==1])
SSY0=data.matrix(y[GeiNomDate+1,dstar==0])
SSY1=data.matrix(y[GeiNomDate+1,dstar==1])
SY0=data.matrix(y[TestPeriod,dstar==0])
SY1=data.matrix(y[TestPeriod,dstar==1])
SX=data.matrix(y[PreTreatPeriod,])
SY=data.matrix(y[TestPeriod,])
SELECTION OF LAMBDA OPT FOR THIS ITERATION
lambda.perm.set = c(seq(0,0.1,.01),seq(.2,1.5,.1)) # sequence of lambdas to test
estval=regsynthpath(SX0,SX1,SSY0,SSY1,V,lambda.perm.set,tol=1e-6)
MSPE = vector(length=length(lambda.perm.set))
for(k in 1:length(lambda.perm.set)){
MSPE[k] = mean(apply((y[324:354,dstar==1] - y[324:354,dstar==0]%*%(estval$Wsol[k,,]))^2,2,mean))
} # [Jay Ryu] take out t before (estval) - do the same in all the relevant places.
Wsol_perm_BC = estval$Wsol[which(MSPE==min(MSPE)),,] # COLLECT W(lambda.opt)
tau_post=SY1-SY0%%(Wsol_perm_BC)
tau_post_BC=tau_post - bias(SX0, SX1, SY, dstar, Wsol_perm_BC)
tau_pre=SX1-SX0%%(Wsol_perm_BC)
tau_pre_BC=tau_pre - bias(SX0,SX1,SX,dstar,Wsol_perm_BC)
MSPE_post_BC=mean(apply((tau_post_BC)^2,2,mean))
MSPE_pre_BC=mean(apply((tau_pre_BC)^2,2,mean))
ratio_star_BC= MSPE_post_BC / MSPE_pre_BC
phiP_perm = apply((SY1-SY0%*%(Wsol_perm_BC)),1,mean) # [Jay Ryu] took out t in front of (Psol) for a single treated unit
ATT_star_BC = mean(phiP_perm - apply(bias(SX0,SX1,SY,dstar,Wsol_perm_BC),1,mean))
return(ratio_star_BC)
}
perm.test_BC=function(d,y,V,lambda.perm.set,R=1000){
PX0=data.matrix(y[PreTreatPeriod,d==0])
PX1=data.matrix(y[PreTreatPeriod,d==1])
PY0=data.matrix(y[TestPeriod,d==0])
PY1=data.matrix(y[TestPeriod,d==1]) #use Psol for the following
PX=data.matrix(y[PreTreatPeriod,])
PY=data.matrix(y[TestPeriod,])
tau_A_po=PY1-PY0%%(Psol)
tau_A_po_BC=tau_A_po - bias(PX0, PX1, PY, d, Psol)
tau_A_pr=PX1-PX0%%(Psol)
tau_A_pr_BC=tau_A_pr - bias(PX0,PX1,PX,d,Psol)
MSPE_po_A_BC=mean(apply((tau_A_po_BC)^2,2,mean))
MSPE_pr_A_BC=mean(apply((tau_A_pr_BC)^2,2,mean))
ratio_BC=MSPE_po_A_BC/MSPE_pr_A_BC
phiP_perm_A = apply((PY1-PY0%*%(Psol)),1,mean) # [Jay Ryu] took out t in front of (Psol) for a single treated unit
ATT_BC = mean(phiP_perm_A - apply(bias(PX0,PX1,PY,d,Psol),1,mean))
theta.reshuffled_BC =replicate(R,permutation.iter.C_BC(d,y,V,lambda.perm.set),simplify = "vector")
#compute p-value
p.val=mean(abs(theta.reshuffled_BC)>=abs(ratio_BC))
print(paste("p_value_BC:",p.val))
return(list(p.val_BC=p.val,theta.obs_BC=ATT_BC))
}
perm.test_BC(d,y,V,lambda.perm.set,30) # [JR] R=30 here to shorten running time.
Activity