ordinary least squares
[results]=ols1a(y,x,results)
y = dependent variable vector (nobs x 1)
x = independent variables matrix (nobs x nvar)
results: an existing tlist of regression results
a tlist with:
- results('meth') = 'ols'
- results('beta') = bhat
- results('tstat') = t-stats
- results('yhat') = yhat
- results('resid') = residuals
- results('sige') = e'*e/(n-k)
- results('rsqr') = rsquared
- results('rbar') = rbar-squared
- results('dw') = Durbin-Watson Statistic
- results('nobs') = nobs
- results('nvar') = nvars
- results('y') = y data vector
- results('x') = x data matrix
load(GROCERDIR+'data/bdhenderic.dat'); [y,namey,x,namex,prests,b]=explouniv('lm1',['ly','lp','rnet','const']) name_test=['test' ;... 'Chow pred. fail. (50%)';... 'Chow pred. fail. (90%)' ; ... 'Doornik & Hansen' ;'AR(1-4)' ; 'hetero x_squared'] [lrmod]=def_results(y,namey,size(y,1),prests,%t,4,b,name_test,%f,1:size(y,1)) r=ols1a(y,x,lrmod(1)) // this a rather tortuous way to perform an ols regression; this is indeed used in automatic for speed reasons | ![]() | ![]() |