## Econometrics and Statistics

# this makes heavy use of the "desk" packet
# loading the desk-pack and deleting current environment
library (desk)
rm.all()

## simple LS-estimate [KQ-Schaetzung] ----

# generating some data
x <- c(5,10,15,20,25)
y <- c(0.5,2.5,2.5,4.5,5)

# optional: illustrational plot
plot(x = x,y = y,
     main = "Sample Data",
     xlab = "exogenous Variable",
     ylab = "endogenous Variable")

# least square estimate (German: KQ-Schaetzung)
# Assumption: influence is in the form of
# y = alpha + (beta * x) + u
sdata.est <- ols(y ~ x)
sdata.est

sdata.est$coef[1] # alpha
sdata.est$coef[2] # beta

# optional: illustrational regression-line
abline(a = sdata.est$coef[1],
       b = sdata.est$coef[2],
       col = "red")

## Significance test ----

## t-test

# hypotheses testing for betas significancy (t-test)
# Assumption: beta is significant. Test is for the opposite.
# H0: "beta = 0", sig.lvl is 5%

# desk-packet
sdata.t <- t.test.coef(sdata.est,
                       q = 0, # value of H0
                       dir = "both",
                       sig.level = 0.05,
                       nh = c(0,1))
sdata.t$results[5]
# H0 rejected, so beta is significant

# manual
sdata.N <- sdata.est$nobs
sdata.Suu <- sdata.est$ssr
sdata.Sxx <- sum((x - mean(x))^2)
sdata.resid.var <- sdata.Suu / (sdata.N - 2)
sdata.resid.var <- sdata.est$sig.squ # ALTERNATIVE
sdata.beta.sd <- sqrt(sdata.resid.var / sdata.Sxx)

sdata.beta.t <- (sdata.est$coef[2] - 0) / sdata.beta.sd
sdata.beta.tcrit <- qt(p = 1 - 0.05,df = sdata.N - 2)

sdata.beta.t <= sdata.beta.tcrit
# t > t.crit, H0 rejected so beta is significant


## F-Test

# F-Tests are usually done, when the significance of more
# than one variable has to be tested at a time. Generally,
# it is possible to do multiple t-test to test for that,
# however it can be shown that in rare occasions
# (especially, when the t- and F-values or close to their
# criticals) both test come to different results.
# Usually it is more reliable to use the F-test for
# multiple variables.
# However the F-Test is unreliable for one-sided test, so
# a t-test should be used in that case.

# for the sake of demonstration new exogene variables
# are generated first, which both influence the endogene.
# Assumption: influence is in the form of
# y = alpha + (beta1 * x1) + (beta2 * x2) + u
x1 <- c(0.1,0.2,0.5,0.7,1.1)
x2 <- c(3,11,15,22,26)
sdata2.est <- ols(y ~ x1 + x2)

# hypotheses testing for beta1s and beta2s significancy (F-Test)
# Assumption: beta1 and beta2 are significant.
# Test is for the opposite.
# H0: "beta1 = beta2 = 0", sig.lvl is 5%

# desk-packet
sdata2.F <- f.test.coef(sdata2.est,
                        nh = rbind(c(0,1,0),c(0,0,1)), # testing beta1,beta2
                        q = c(0,0), # Value of H0 for beta1,beta2
                        sig.level = 0.05)
sdata2.F$results[5]
# H0 rejected, so beta1 and beta2 are significant

# manual

# help-regression in which H0 is true
sdata2.help.est <- ols(y ~ 1) # beta1 and beta2 have no influence

sdata2.help.est.ssr <- sdata2.help.est$ssr # this is the same as Syy
sdata2.est.ssr <- sdata2.est$ssr

sdata2.Fval <- (((sdata2.help.est.ssr - sdata2.est.ssr) / 2) /
                  (sdata2.est.ssr / (length(y) - 2 - 1)))

sdata2.Fcrit <- qf(p = 1 - 0.05,df1 = 2,df2 = length(y) - 2 - 1)

(-sdata2.Fcrit) < sdata2.Fval
sdata2.Fval < sdata2.Fcrit
# F is not located in the tollerance-intervall [-Fcrit ; Fcrit]
# so H0 is being rejected