Загружаем пакеты

library("vars")
library("ggplot2")
library("mvtsplot")
library("MSBVAR")

Пакет MSBVAR позволяет оценивать байесовские VAR с марковским переключением. Пакет bvarr реализует шесть разных предпосылок на априорное распределение и является переводом на R матлабовского кода Koop и Korobilis. Пакет bvarr пока нужно ставить с гитхаба:

devtools::install_github("bdemeshev/bvarr")

Если он установлен, то используется он стандартно:

library("bvarr")

Строим необычный график для многомерных временных рядов

data(Canada)
mvtsplot(Canada)

plot(Canada)

help(Canada)
data(IsraelPalestineConflict)
mvtsplot(IsraelPalestineConflict)

Оцениваем байесовскую VAR

fit.BVAR <- szbvar(IsraelPalestineConflict, p = 6,
                   lambda0 = 0.6, lambda1 = 0.1,
                   lambda3 = 2, lambda4 = 0.25, lambda5 = 0, mu5 = 0,
                   mu6 = 0, nu = 3, qm = 4, posterior.fit = FALSE)

summary(fit.BVAR)
## ------------------------------------------
## Sims-Zha Prior reduced form Bayesian VAR
## ------------------------------------------
## Prior form :  Normal-inverse Wishart 
## Prior hyperparameters : 
## lambda0 = 0.6 
## lambda1 = 0.1 
## lambda3 = 2 
## lambda4 = 0.25 
## lambda5 = 0 
## mu5     = 0 
## mu6     = 0 
## nu      = 3 
## ------------------------------------------
## Number of observations :  1275 
## Degrees of freedom per equation :  1262 
## ------------------------------------------
## Posterior Regression Coefficients :
## ------------------------------------------
## Autoregressive matrices: 
## B(1)
##          [,1]     [,2]
## [1,] 0.614894 0.085887
## [2,] 0.244520 0.524334
## 
## B(2)
##          [,1]     [,2]
## [1,] 0.015069 0.014022
## [2,] 0.103499 0.060585
## 
## B(3)
##          [,1]     [,2]
## [1,] 0.010901 0.006883
## [2,] 0.031196 0.014888
## 
## B(4)
##          [,1]     [,2]
## [1,] 0.005543 0.001732
## [2,] 0.011299 0.004835
## 
## B(5)
##          [,1]     [,2]
## [1,] 0.001618 0.001180
## [2,] 0.003517 0.001799
## 
## B(6)
##          [,1]     [,2]
## [1,] 0.001019 0.000493
## [2,] 0.001895 0.001063
## 
## ------------------------------------------
## Constants
## -8.520156 -2.633261 
## ------------------------------------------
## ------------------------------------------
## Posterior error covariance
##          [,1]     [,2]
## [1,] 4519.496 1291.537
## [2,] 1291.537 1153.852
## 
## ------------------------------------------
posterior.impulses <- mc.irf(fit.BVAR, nsteps = 10, draws = 5000)
## Monte Carlo IRF Iteration = 1000
## Monte Carlo IRF Iteration = 2000
## Monte Carlo IRF Iteration = 3000
## Monte Carlo IRF Iteration = 4000
## Monte Carlo IRF Iteration = 5000
plot(posterior.impulses) 

Оцениваем байесовскую SVAR

data(BCFdata)
head(Y)
## [1] -2.5805556 -2.8822581 -1.7427778 -0.3977957 -0.9771505 -0.9365130
head(z2)
## [1] 0 0 0 0 0 0
m <- ncol(Y)
ident <- diag(m)
ident[1, ] <- 1
ident[2, 1] <- 1

mvtsplot(Y)

# estimate the model's posterior moments
model <- szbsvar(Y, p = 5, z = z2, lambda0 = 0.8, lambda1 = 0.1,
                 lambda3 = 1, lambda4 = 0.1, lambda5 = 0.05,
                 mu5 = 0, mu6 = 5, ident, qm = 12)
## Estimating starting values for the numerical optimization
## of the log posterior of A(0)
## Estimating the final values for the numerical optimization
## of the log posterior of A(0)
## initial  value 2.762930 
## final  value 2.762930 
## converged
summary(model)
## ------------------------------------------
## A0 restriction matrix
## ------------------------------------------
##      [,1] [,2] [,3]
## [1,]    1    1    1
## [2,]    1    1    0
## [3,]    0    0    1
## 
## ------------------------------------------
## Sims-Zha Prior Bayesian Structural VAR
## ------------------------------------------
## Prior form : Sims-Zha
## Prior hyperparameters : 
## lambda0 = 0.8 
## lambda1 = 0.1 
## lambda3 = 1 
## lambda4 = 0.1 
## lambda5 = 0.05 
## mu5     = 0 
## mu6     = 5 
## nu      = 4 
## ------------------------------------------
## Number of observations :  116 
## Degrees of freedom per equation :   
## ------------------------------------------
## Posterior Regression Coefficients :
## ------------------------------------------
## Reduced Form Autoregressive matrices: 
## B(1)
##           [,1]      [,2]      [,3]
## [1,]  0.790285 -0.019993 -0.067985
## [2,] -0.032992  0.753804 -0.042762
## [3,]  0.007008  0.011966  0.847383
## 
## B(2)
##           [,1]      [,2]      [,3]
## [1,]  0.034296  0.022477  0.020726
## [2,] -0.055705 -0.046390 -0.044275
## [3,]  0.032491  0.002655  0.077666
## 
## B(3)
##           [,1]      [,2]      [,3]
## [1,]  0.028527  0.051645 -0.003116
## [2,]  0.077929  0.088673  0.004632
## [3,] -0.028031 -0.013405  0.012637
## 
## B(4)
##           [,1]      [,2]      [,3]
## [1,]  0.009182 -0.017264 -0.027171
## [2,] -0.006181 -0.018474 -0.055399
## [3,] -0.006835 -0.009102  0.005352
## 
## B(5)
##           [,1]      [,2]      [,3]
## [1,] -0.001115  0.007283  0.006700
## [2,] -0.000493 -0.003106 -0.023519
## [3,] -0.009684  0.003604  0.043470
## 
## ------------------------------------------
## Reduced Form Constants
## -0.008907 -0.008216 0.015069 
## ------------------------------------------
## ------------------------------------------
## Reduced Form Exogenous coefficients
##               [,1]          [,2]          [,3]
##  [1,] -0.008006990 -0.0122298108 -0.0005452505
##  [2,] -0.003950520  0.0018615468 -0.0071596909
##  [3,]  0.004919295  0.0030683811 -0.0020537350
##  [4,] -0.005048346 -0.0072193561 -0.0053027417
##  [5,] -0.002997244 -0.0005684953 -0.0039403477
##  [6,] -0.001559374  0.0054264241  0.0298526956
##  [7,] -0.001806906 -0.0021317026 -0.0112648812
##  [8,] -0.045236611 -0.0180729892 -0.0073189713
##  [9,] -0.015128147 -0.0042849860  0.0026818499
## 
## ------------------------------------------
## Structural Autoregressive matrices: 
## A(1)
##          [,1]      [,2]      [,3]
## [1,] 0.544291  0.130711  0.007301
## [2,] 0.028377 -0.048106  0.023086
## [3,] 0.030318  0.072143 -0.022182
## 
## A(2)
##          [,1]      [,2]      [,3]
## [1,] 0.002853 -0.008053 -0.006598
## [2,] 0.000710 -0.000975 -0.005985
## [3,] 0.389244 -0.797928 -0.009155
## 
## A(3)
##           [,1]      [,2]     [,3]
## [1,] -0.007342  0.022185 0.012394
## [2,] -0.040312 -0.055718 0.000846
## [3,]  0.022203  0.016297 0.006262
## 
## A(4)
##           [,1]     [,2]      [,3]
## [1,] -0.008081 0.002995 -0.008257
## [2,]  0.022563 0.014877 -0.293974
## [3,] -0.007235 0.015431 -0.026985
## 
## A(5)
##           [,1]      [,2]      [,3]
## [1,]  0.001044 -0.001708 -0.004348
## [2,]  0.009414  0.019226 -0.001848
## [3,] -0.002323  0.008160 -0.015068
## 
## ------------------------------------------
## Structural Constants
## -0.007855 0.004376 -0.005216 
## ------------------------------------------
## ------------------------------------------
## Structural Exogenous coefficients
##                [,1]          [,2]          [,3]
##  [1,] -8.047992e-03  0.0089626528  0.0001995043
##  [2,] -2.361867e-03 -0.0037745498  0.0024888731
##  [3,]  4.038660e-03 -0.0008916305  0.0007061007
##  [4,] -4.974055e-03  0.0051407004  0.0018460988
##  [5,] -2.195578e-03 -0.0008073673  0.0013708195
##  [6,]  2.372668e-05 -0.0063604180 -0.0103541717
##  [7,] -1.688160e-03  0.0013704658  0.0039102318
##  [8,] -3.507164e-02 -0.0023305987  0.0025975047
##  [9,] -1.137031e-02 -0.0026055036 -0.0009108015
## 
## ------------------------------------------
## ------------------------------------------
## Posterior mode of the A0 matrix
##          [,1]      [,2]      [,3]
## [1,] 0.693882  0.466274 -0.001293
## [2,] 0.203771 -1.038128  0.000000
## [3,] 0.000000  0.000000 -0.346910
## 
## ------------------------------------------

simulate A0 posterior Построение IRF требует на один шаг больше:

A0.post <- gibbs.A0(model, N1 = 1000, N2 = 5000)
## Normalization Method:  DistanceMLA ( 0 )
## Gibbs Burn-in 10 % Complete
## Gibbs Burn-in 20 % Complete
## Gibbs Burn-in 30 % Complete
## Gibbs Burn-in 40 % Complete
## Gibbs Burn-in 50 % Complete
## Gibbs Burn-in 60 % Complete
## Gibbs Burn-in 70 % Complete
## Gibbs Burn-in 80 % Complete
## Gibbs Burn-in 90 % Complete
## Gibbs Burn-in 100 % Complete
## Gibbs Sampling 10 % Complete (500 draws)
## A0 log-det    = -1.248426 
## Gibbs Sampling 20 % Complete (1000 draws)
## A0 log-det    = -1.165853 
## Gibbs Sampling 30 % Complete (1500 draws)
## A0 log-det    = -1.180995 
## Gibbs Sampling 40 % Complete (2000 draws)
## A0 log-det    = 1.569468 
## Gibbs Sampling 50 % Complete (2500 draws)
## A0 log-det    = -1.160220 
## Gibbs Sampling 60 % Complete (3000 draws)
## A0 log-det    = -1.371551 
## Gibbs Sampling 70 % Complete (3500 draws)
## A0 log-det    = -1.292779 
## Gibbs Sampling 80 % Complete (4000 draws)
## A0 log-det    = -1.001444 
## Gibbs Sampling 90 % Complete (4500 draws)
## A0 log-det    = -1.458019 
## Gibbs Sampling 100 % Complete (5000 draws)
## A0 log-det    = -1.174846
model.irfs <- mc.irf(model, nsteps = 10, draws = 5000, A0.posterior = A0.post)
## Monte Carlo IRF 10 % Complete
## Monte Carlo IRF 20 % Complete
## Monte Carlo IRF 30 % Complete
## Monte Carlo IRF 40 % Complete
## Monte Carlo IRF 50 % Complete
## Monte Carlo IRF 60 % Complete
## Monte Carlo IRF 70 % Complete
## Monte Carlo IRF 80 % Complete
## Monte Carlo IRF 90 % Complete
## Monte Carlo IRF 100 % Complete
plot(model.irfs)

Оценим одну из шести моделей из Koop и Korobilis

model <- bvar(Y, prior = "independent")
## Iteration 2000 out of 12000
## Iteration 4000 out of 12000
## Iteration 6000 out of 12000
## Iteration 8000 out of 12000
## Iteration 10000 out of 12000
## Iteration 12000 out of 12000
bvar.imp.plot(model)

Почиташки:

Я заинтересован в развитии байесовских алгоритмов для временных рядов в R. Если есть желающие писать ВКР, не боящиеся прогать на R или C и шарящие в вероятностях — добро пожаловать!