ordinary least squares
[results]=ols2a(y,x,prescte,results)
y = dependent variable vector (nobs x 1)
x = independent variables matrix (nobs x nvar)
prescte = a boolean indicating whether the model contains a cte term or not
results = a tlist containing all necessary fields, with the fields 'meth','nobs','y','namey','prests', 'ym' are already filled
results = a tlist with
. results('meth') = 'ols'
. results('y') = y data vector
. results('x') = x data matrix
. results('nobs') = nobs
. results('nvar') = nvars
. results('beta') = bhat
. results('yhat') = yhat
. results('resid') = residuals
. results('vcovar') = estimated variance-covariance matrix of beta
. results('sige') = estimated variance of the residuals
. results('sige') = estimated variance of the residuals
. results('ser') = standard error of the regression
. results('tstat') = t-stats
. results('pvalue') = pvalue of the betas
. results('dw') = Durbin-Watson Statistic
. results('condindex') = multicolinearity cond index
. results('prescte') = boolean indicating the presence or absence of a constant in the regression
. results('rsqr') = rsquared
. results('rbar') = rbar-squared
. results('f') = F-stat for the nullity of coefficients other than the constant
. results('pvaluef') = its significance level
load(GROCERDIR+'data/bdhenderic.dat'); [y,namey,x,namex,prests,b]=explouniv('lm1',['ly','lp','rnet','const']) r1=ols2(y,x) r1(1)($+1)='aic' r1(1)($+1)='bic' r1(1)($+1)='hq' r2=ols2a(y,x(:,1:4),%f,r1) // perform the same regression, but // this a rather tortuous way to perform an ols regression; this is indeed used in automatic for speed reasons | ![]() | ![]() |