library(mvtnorm)

library(class)

getwd()

# Normalize data (by subtracting Mean and Dividing by SD of the RefData

normalizeData = function(DataToNormalize, RefData){

normalizedData = DataToNormalize

for (n in 1:ncol(DataToNormalize)){

normalizedData[,n] = (DataToNormalize[,n] - mean(RefData[,n])) /sd(RefData[,n])

}

return(normalizedData)

}

# Calculate initial scaling factors R0s # S0s = p-values

InitialRs = function (X, eta, S0s) {

K = length(S0s); R0s = rep(0, K)

for (k in 1:K) {

# force <12 to avoid s=exp(d) overflow

R0s[k] = min((-log(S0s[k]))^(1/eta) / max(IQR(X[ ,k]), 0.0000001), 12) }

return (R0s);

}

Distance = function (Rs, x1, x2) {

# 3 vectors: Rs = scaling factors, x1 = point 1, x2 = point 2 K = length(Rs)

d = 0; for (k in 1:K) { d = d + (Rs[k]* (x2[k]-x1[k]))^2 }

return (d^0.5)

}

# Calculate Similarity Score.

SimilarityTrain = function(eta, Rs, Xtrain) {

N1 = nrow(Xtrain); K = length(Rs); S = matrix(0, nrow=N1, ncol=N1)

for (i in 1:N1) { for (j in i:N1) {

d2 = 0; for (k in 1:K) {

d2 = d2 + (Rs[k]* (Xtrain[i,k]-Xtrain[j,k]))^2}

S[i,j] = exp(-d2^0.5)

S[j,i]=S[i,j] # Use symmetry to reduce 50% CPU time. } } # End j and i loops

return (S)

}

# Calculate Similarity Scores between training and test subjects.

Similarity = function(eta, Rs, Xtrain, Xtest) {

N1 = nrow(Xtrain); N2=nrow(Xtest); K = length(Rs)

S = matrix(0, nrow=N2, ncol=N1)

for (i in 1:N2) { for (j in 1:N1) {

d2 = 0; for (k in 1:K) {

d2 = d2 + (Rs[k]* (Xtrain[i,k]-Xtrain[j,k]))^2}

S[i,j] = exp(-d2^0.5)

} } # End j and i loops

return (S)

}

Weight = function(S) {

N1= nrow(S); N2=ncol(S);

W = matrix(0, nrow=N1, ncol=N2)

for (i in 1:N1) {

sum_S_row = sum(S[i, ])

for (j in 1:N2) { W[i,j] = S[i,j] / max(sum S row,0.00000000001) } }

return (W)

} # End of Weight

PredictedY = function (W, Ytrain, Ytest) {

# Calculate predicted outcome and Error

OutObj = list()

OutObj$pred Y = W %*% Ytrain # For binary outcome, pred y = prob of being 1.

# Mean squared error

OutObj$MSE = mean((OutObj$pred Y - Ytest)^2)

return (OutObj)

} # End of PredictionY

DerivativeE = function(eta, pred Y, Rs, X, S, O) {

# Derivatives if the loss function

N = nrow(X); K =length(Rs)

der_S = matrix(0, nrow=K*N, ncol=N); der W = matrix(0, nrow=K*N,ncol=N)

dist = matrix(0, nrow=N, ncol=N); der E = rep(0, K)

for (i in 1:N) { for (j in 1:N) {d2 = 0; for (k in 1:K) {

d2 = d2 + (Rs[k]*(X[i,k]-X[j,k]))^2

}

dist[i,j] = max (d2^0.5, 0.0000001)}}

for (m in 1:K) { for (i in 1:N) { for (j in 1:N) {

der d = (Rs[m]/dist[i,j]) * (X[i,m]-X[j,m]) ^2

der S[(m-1)*N+i, j] = -1 * S[i,j] * eta * (dist[i,j])^(eta-1) * der d

} } } # End of j, i, and m loops

# Weight Derivative

for (m in 1:K) { for (i in 1:N) {

sum der S = sum(der S[(m-1)*N+i, ]); sumSi = sum(S[i, ])

for (j in 1:N) {

der W[(m-1)*N+i, j] = der S[(m-1)*N+i, j] / sumSi - S[i,j]* sum der S /sumSi^2

} } } # End of j, i, and m loops

# Derivatives of E

for (m in 1:K) { for (i in 1:N) {

err = (pred Y[i] - O[i])

for (j in 1:N) { der E[m] = der E[m] + 2/N * err * O[j] * der W[(m-1)*N+i,j] }

} } # End of I and m loops return (der E)

} # End of DerivativeE

Learning = function (LearningRate, Lamda, Rs, der E) {

# Update scaling factors, Rs.

K=length(Rs)

der lossFun = der E+2*Lamda*Rs # derivatives of loss function Rs = Rs - LearningRate * der lossFun

# force Rs<25 to avoid S=exp(d) overflow

for (m in 1:length(Rs)) { Rs[m] = min(max(0,Rs[m]),25) }

return (Rs)

} # End of Learning

# Training AI to obtain scaling factors, Rs ###

SBMLtrain = function(Epoch, S0, Lamda, LearningRate, eta, Xtrain, Ytrain) {

TrainObj=list(); OutObj0= list(); OutObj= list()

R0 = InitialRs(Xtrain, eta, S0);

S = SimilarityTrain(eta, R0, Xtrain)

OutObj0 = PredictedY (Weight(S), Ytrain, Ytrain)

Rs=R0; OutObj =OutObj0 # in case Epoch =0 for no learning

TrainMSE0 = OutObj0$MSE

preLoss = OutObj0$MSE+Lamda*sum(Rs^2)

# Learning

iter=0;

while (iter<Epoch) {

preRs=Rs

Rs = Learning(LearningRate, Lamda, Rs, DerivativeE(eta, OutObj0$pred Y, Rs, Xtrain,S, Ytrain))

OutObj = PredictedY (Weight(SimilarityTrain (eta, Rs, Xtrain)), Ytrain,Ytrain)

iter=iter+1

Loss = OutObj$MSE+Lamda*sum(Rs^2)

if (Loss>preLoss) {Rs=preRs; iter=Epoch+1}

preLoss=Loss

}

TrainObj$Rs = Rs; TrainObj$Y = OutObj$pred Y; TrainObj$MSE =

OutOb j$MSE return( TrainObj)

}

# Linear regression

# outcome = ”gaussian” or ”binomial”, ... GLMPv = function(Y, X, outcome) {

LMfull=glm(Y  ̃., data=X, family=outcome)

sumLM=coef(summary(LMfull)) pVals=sumLM[,4][1:ncol(X)+1]

# pVals <- summary(LMfull)$coef[, ”Pr(>|t|)”] return (pVals)

}