Last updated: 2020-01-30

Checks: 7 0

Knit directory: misc/

This reproducible R Markdown analysis was created with workflowr (version 1.5.0). The Checks tab describes the reproducibility checks that were applied when the results were created. The Past versions tab lists the development history.


Great! Since the R Markdown file has been committed to the Git repository, you know the exact version of the code that produced these results.

Great job! The global environment was empty. Objects defined in the global environment can affect the analysis in your R Markdown file in unknown ways. For reproduciblity it’s best to always run the code in an empty environment.

The command set.seed(20191122) was run prior to running the code in the R Markdown file. Setting a seed ensures that any results that rely on randomness, e.g. subsampling or permutations, are reproducible.

Great job! Recording the operating system, R version, and package versions is critical for reproducibility.

Nice! There were no cached chunks for this analysis, so you can be confident that you successfully produced the results during this run.

Great job! Using relative paths to the files within your workflowr project makes it easier to run your code on other machines.

Great! You are using Git for version control. Tracking code development and connecting the code version to the results is critical for reproducibility. The version displayed above was the version of the Git repository at the time these results were generated.

Note that you need to be careful to ensure that all relevant files for the analysis have been committed to Git prior to generating the results (you can use wflow_publish or wflow_git_commit). workflowr only checks the R Markdown file, but you know if there are other scripts or data files that it depends on. Below is the status of the Git repository when the results were generated:


Ignored files:
    Ignored:    .DS_Store
    Ignored:    .Rhistory
    Ignored:    .Rproj.user/

Untracked files:
    Untracked:  data/ROTS_results.RData
    Untracked:  data/pbmc.rds
    Untracked:  data/pbmc_counts.rds

Note that any generated files, e.g. HTML, png, CSS, etc., are not included in this status report because it is ok for generated content to have uncommitted changes.


These are the previous versions of the R Markdown and HTML files. If you’ve configured a remote Git repository (see ?wflow_git_remote), click on the hyperlinks in the table below to view them.

File Version Author Date Message
Rmd 5cfd32e Dongyue Xie 2020-01-30 wflow_publish(“analysis/selectrotation.Rmd”)
html 39b99d1 Dongyue Xie 2020-01-04 Build site.
Rmd d54e1af Dongyue Xie 2020-01-04 wflow_publish(“analysis/selectrotation.Rmd”)

random rotation

select random rotation matrix via oob

library(rpart)
library(BART)
Loading required package: nlme
Loading required package: nnet
Loading required package: survival
as.numeric.factor <- function(x) {as.numeric(levels(x))[x]}

random_rotation = function(p,screening,r){
  Q=qr.Q(qr(matrix(rnorm(p^2),p,p)))
  if(screening){
    active.var = rbinom(p,1,r)
    Q[-which(active.var==1),]=0
  }
  Q
}

rotate_tree = function(x,y,x.oob,y.oob,xtest,ytest,n_rotate,control,screening,r){

  
  p = ncol(x)
  n = nrow(x)
  
  
  fit = rpart(y~.,data = data.frame(y=y,x=x),method='class',control = control)
  ypred = predict(fit,data.frame(x=x.oob),type='class')
  
  
  best.accuracy = sum(ypred==y.oob)/length(y.oob)
  best.fit = fit
  best.Rotation = 'identity'
  best.yhat = predict(fit,data.frame(x=x),type='class')
  
  for(rot in 1:n_rotate){
    Rmat = random_rotation(p,screening,r)
    if(sum(Rmat)!=0){
      x.train = x%*%Rmat
    x.test = x.oob%*%Rmat
    fit = rpart(y~.,data = data.frame(y=y,x=x.train), method='class',control = control)
    ypred = predict(fit,data.frame(x=x.test),type='class')
    accuracy = (sum(ypred==y.oob)/length(ypred)) 
    if(accuracy > best.accuracy){
      best.accuracy = accuracy
      best.fit = fit
      best.Rotation = Rmat
      best.yhat = predict(fit,data.frame(x=x.train),type='class')
    }
    }
  }
  return(list(best.accuracy = best.accuracy,
      best.fit = best.fit,
      best.Rotation = best.Rotation,
      best.yhat = best.yhat))
}

rrrforest = function(x,y,xtest=NULL,ytest=NULL,ntree=100, mtry=floor(sqrt(ncol(x))),
                     replace=TRUE,samplesize = if (replace) nrow(x) else ceiling(.632*nrow(x)),
                     control = rpart.control(),screening = TRUE,eb.screen = TRUE,
                     delta=max(c(0,(1+log(ncol(x)/nrow(x)))/2)),
                     keepforest = FALSE,n_rotate=30,printevery=100){
  x = as.matrix(x)
  xtest = as.matrix(xtest)
  y = as.factor(y)
  ytest = as.factor(ytest)
  
  p = ncol(x)
  n = nrow(x)
  
  if (mtry >= p) stop("mtry should be smaller than the number of columns in x")
  
  forest = list()
  
  if(screening){
    ms.idx = which(y==1)
    ms = apply(x,2,function(z){(mean(z[ms.idx])-mean(z[-ms.idx]))})
    if(eb.screen){
      sds = sqrt(apply(x,2,function(z){var(z[ms.idx])/length(ms.idx)+var(z[-ms.idx])/(n-length(ms.idx))}))
      pn.res = ebnm::ebnm(ms,sds,'point_normal')$posterior$mean
      
      r = abs(pn.res)/max(abs(pn.res))
    }else{
      r = abs(ms)/max(abs(ms))
    }
    r = r^delta
  }else{
    r=rep(1,p)
  }

  yhat = matrix(nrow=n,ncol=ntree)
  ypred = matrix(nrow=n,ncol=ntree)
  oob_accuracy = c()
  
  for(t in 1:ntree){
    
   if(t%%printevery==0){print(sprintf("done %d (out of %d)",t,ntree))}
    
    #select variables to split
    var_idx = sample(1:p,mtry)
    #select samples 
    sample_idx = sample(1:n,samplesize,replace = replace)
    
    x_t = x[sample_idx,var_idx]
    y_t = y[sample_idx]
    x_t_oob = x[-unique(sample_idx),var_idx]
    y_t_oob = y[-unique(sample_idx)]
    
    tree.fit = rotate_tree(x_t,y_t,x_t_oob,y_t_oob,n_rotate=n_rotate,control=control,screening,r[var_idx])
    
    
    oob_accuracy[t] = tree.fit$best.accuracy
    yhat[sample_idx,t] = as.numeric.factor(tree.fit$best.yhat)
    
    if(!is.null(xtest)){
      if(is.character(tree.fit$best.Rotation)){
      ypred[,t] = as.numeric.factor(predict(tree.fit$best.fit, data.frame(x=xtest[,var_idx]),type='class'))
    }else{
      ypred[,t] = as.numeric.factor(predict(tree.fit$best.fit, data.frame(x=xtest[,var_idx]%*%tree.fit$best.Rotation),type='class'))
    }
    }
    
    if(keepforest){
      forest[[t]] = tree.fit
    }
    
  }
  vote_hat = 1*(apply(yhat,1,mean,na.rm=TRUE) > 0.5)
  vote_pred = 1*(apply(ypred,1,mean,na.rm=TRUE) > 0.5)
  
  accuracy_train = sum(y == vote_hat)/n
  accuracy_test = ifelse(is.null(ytest),ytest,sum(ytest==vote_pred)/length(ytest))
  
  return(list(accuracy_train=accuracy_train,
              accuracy_test =accuracy_test, 
              yhat = yhat, 
              ypred = ypred, 
              vote_hat = vote_hat,
              vote_pred = vote_pred,
              oob_accuracy = oob_accuracy,
              forest = forest))
}

expreiment

f_linear = function(x){
  ey=-2.50*x[,1] + 2.0*x[,2] -1.60*x[,3] + 1.2*x[,4] +0.9*x[,5] 
  as.factor(rbinom(nrow(x),1,pnorm(ey)))
}

f_friedman=function(x){
  ey = (((sin(pi*x[,1]*x[,2]) + 2*(x[,3]-.5)^2+x[,4]-2*x[,5])))
  as.factor(rbinom(nrow(x),1,pnorm(ey)))
}

set.seed(1234)
n=100
p=100
x = matrix(runif(n*p),nrow=n)
y = f_linear(x)

xtest = matrix(runif(n*p),nrow=n)
ytest = f_linear(xtest)

round((apply(x,2,function(z){abs(mean(z[y==1])-mean(z[y==0]))}))/max((apply(x,2,function(z){abs(mean(z[y==1])-mean(z[y==0]))}))),2)

round(abs(apply(x,2,cor,as.numeric.factor(y)))/max(abs(apply(x,2,cor,as.numeric.factor(y)))),2)


ms = (apply(x,2,function(z){(mean(z[y==1])-mean(z[y==0]))}))

sds = sqrt(apply(x,2,function(z){(var(z)/sum(as.numeric.factor(y))+var(z)/(n-sum(as.numeric.factor(y))))}))

library(ebnm)

pn.res = ebnm(ms,sds,'point_normal')

round(abs(pn.res$posterior$mean)/max(abs(pn.res$posterior$mean)),2)


library(randomForest)
rf = randomForest(x,y,xtest,ytest,ntree=100)
rrrf = rrrforest(x,y,xtest,ytest,ntree=100,n_rotate = 30,replace = F)
rrf = rotationForest(data.frame(x),y,L=100)
rprf = rprforest(x,y,xtest,ytest)


library(pROC)
library(rotationForest)


roc_obj <- roc(as.numeric.factor(ytest), as.numeric.factor(rf$test$predicted))
auc(roc_obj)

accuracy_calc = function(a,b){
  sum(a==b)/length(a)
}

roc_obj <- roc(as.numeric.factor(ytest), rrrf$vote_pred)
auc(roc_obj)

roc_obj <- roc(as.numeric.factor(ytest), 1*(predict(rrf,data.frame(xtest)) > 0.5))
auc(roc_obj)



ypred = rrrf$ypred
err.rate = c()

for(i in 1:ncol(ypred)){
  err.rate[i] = sum(ytest!= (1*(apply(ypred[,1:i,drop=FALSE],1,mean) > 0.5)))/length(ytest)
}

plot(1-rf$test$err.rate[,1],type='l')
lines(1-err.rate,col=4)

# correlation of oob accuracy and accu on test data
acc.rate = c()
for(i in 1:ncol(ypred)){
  acc.rate[i] = sum(ytest == ypred[,i])/length(ytest)
}

cor(acc.rate,rrrf$oob_accuracy)

random projection

random projection

# generate sparse random projection
#'@param p,q random projection matrix of dimension p*q
#'@param r a vector of length p indicate variables weights
#'@param s parameter in sprase random projection, see https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf

random_projection = function(p,q,r,s){
  active.var = rbinom(p,1,r)
  rp.mat = matrix(sqrt(s)*rbinom(p*q,1,1/s)*(2*rbinom(p*q,1,0.5)-1),nrow=p)
  rp.mat[-which(active.var==1),] = 0
  rp.mat
}


project_tree = function(x,y,x.oob,y.oob,n_project,control,r,q,s){

  
  p = ncol(x)
  n = nrow(x)
  
  rp1 = random_projection(p,q,r,s)
  
  fit = rpart(y~.,data = data.frame(y=y,x=x%*%rp1),method='class',control = control)
  ypred = predict(fit,data.frame(x=x.oob%*%rp1),type='class')
  
  
  best.accuracy = sum(ypred==y.oob)/length(y.oob)
  best.fit = fit
  best.project = rp1
  best.yhat = predict(fit,data.frame(x=x%*%rp1),type='class')
  
  for(rot in 1:n_project){
    Rmat = random_projection(p,q,r,s)
    x.train = x%*%Rmat
    x.test = x.oob%*%Rmat
    fit = rpart(y~.,data = data.frame(y=y,x=x.train), method='class',control = control)
    ypred = predict(fit,data.frame(x=x.test),type='class')
    accuracy = (sum(ypred==y.oob)/length(ypred)) 
    if(accuracy > best.accuracy){
      best.accuracy = accuracy
      best.fit = fit
      best.project = Rmat
      best.yhat = predict(fit,data.frame(x=x.train),type='class')
    }
  }
  return(list(best.accuracy = best.accuracy,
      best.fit = best.fit,
      best.project = best.project,
      best.yhat = best.yhat))
}


# q: random projection dimension, could be a vector

# delta: |r|^delta

rprforest = function(x,y,xtest=NULL,ytest=NULL,ntree=100,mtry=floor(sqrt(ncol(x))),
                     replace=TRUE,samplesize = if (replace) nrow(x) else ceiling(.632*nrow(x)),
                     control = rpart.control(),
                     keepforest = FALSE,n_project=30,printevery=100,
                     q=round(seq(log(ncol(x)),mtry,length.out = ntree/10)),
                     #q=mtry,
                     s=3,delta=max(c(0,(1+log(ncol(x)/nrow(x)))/2))){
  x = as.matrix(x)
  xtest = as.matrix(xtest)
  y = as.factor(y)
  ytest = as.factor(ytest)
  
  p = ncol(x)
  n = nrow(x)
  
  if (mtry >= p) stop("mtry should be smaller than the number of columns in x")
  
  forest = list()
  
  # find r
  
  ms.idx = y==1
  ms = apply(x,2,function(z){abs(mean(z[ms.idx])-mean(z[-ms.idx]))})
  
  r = abs(ms)/max(abs(ms))
  
  r = r^delta
  
  yhat = matrix(nrow=n,ncol=ntree)
  ypred = matrix(nrow=n,ncol=ntree)
  oob_accuracy = c()
  
  for(t in 1:ntree){
    
   if(t%%printevery==0){print(sprintf("done %d (out of %d)",t,ntree))}
    
    #select variables to split
    var_idx = sample(1:p,mtry)
    #select samples 
    sample_idx = sample(1:n,samplesize,replace = replace)
    
    x_t = x[sample_idx,var_idx]
    y_t = y[sample_idx]
    x_t_oob = x[-unique(sample_idx),var_idx]
    y_t_oob = y[-unique(sample_idx)]
    
    if(length(q)>1){
      qt = sample(q,1)
    }else{
      qt=q
    }
    
    tree.fit = project_tree(x_t,y_t,x_t_oob,y_t_oob,n_project=n_project,control=control,r=r[var_idx],q=qt,s=s)
    
    
    oob_accuracy[t] = tree.fit$best.accuracy
    yhat[sample_idx,t] = as.numeric.factor(tree.fit$best.yhat)
    
    if(!is.null(xtest)){
      ypred[,t] = as.numeric.factor(predict(tree.fit$best.fit, data.frame(x=xtest[,var_idx]%*%tree.fit$best.project),type='class'))
    }
    
    if(keepforest){
      forest[[t]] = tree.fit
    }
    
  }
  vote_hat = 1*(apply(yhat,1,mean,na.rm=TRUE) > 0.5)
  vote_pred = 1*(apply(ypred,1,mean,na.rm=TRUE) > 0.5)
  
  accuracy_train = sum(y == vote_hat)/n
  accuracy_test = ifelse(is.null(ytest),ytest,sum(ytest==vote_pred)/length(ytest))
  
  return(list(accuracy_train=accuracy_train,
              accuracy_test =accuracy_test, 
              yhat = yhat, 
              ypred = ypred, 
              vote_hat = vote_hat,
              vote_pred = vote_pred,
              oob_accuracy = oob_accuracy,
              forest = forest))
}

sessionInfo()
R version 3.6.1 (2019-07-05)
Platform: x86_64-apple-darwin15.6.0 (64-bit)
Running under: macOS High Sierra 10.13.6

Matrix products: default
BLAS:   /Library/Frameworks/R.framework/Versions/3.6/Resources/lib/libRblas.0.dylib
LAPACK: /Library/Frameworks/R.framework/Versions/3.6/Resources/lib/libRlapack.dylib

locale:
[1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8

attached base packages:
[1] stats     graphics  grDevices utils     datasets  methods   base     

other attached packages:
[1] BART_2.7          survival_2.44-1.1 nnet_7.3-12       nlme_3.1-141     
[5] rpart_4.1-15     

loaded via a namespace (and not attached):
 [1] Rcpp_1.0.2      knitr_1.25      whisker_0.4     magrittr_1.5   
 [5] workflowr_1.5.0 splines_3.6.1   lattice_0.20-38 R6_2.4.0       
 [9] rlang_0.4.0     stringr_1.4.0   tools_3.6.1     parallel_3.6.1 
[13] grid_3.6.1      xfun_0.10       git2r_0.26.1    htmltools_0.4.0
[17] yaml_2.2.0      digest_0.6.21   rprojroot_1.3-2 Matrix_1.2-17  
[21] later_1.0.0     promises_1.1.0  fs_1.3.1        glue_1.3.1     
[25] evaluate_0.14   rmarkdown_1.16  stringi_1.4.3   compiler_3.6.1 
[29] backports_1.1.5 httpuv_1.5.2