in econml/inference/_bootstrap.py [0:0]
def __getattr__(self, name):
"""
Get proxy attribute that wraps the corresponding attribute with the same name from the wrapped object.
Additionally, the suffix "_interval" is supported for getting an interval instead of a point estimate.
"""
# don't proxy special methods
if name.startswith('__'):
raise AttributeError(name)
def proxy(make_call, name, summary):
def summarize_with(f):
results = np.array(Parallel(n_jobs=self._n_jobs, prefer='threads', verbose=self._verbose)(
(f, (obj, name), {}) for obj in self._instances)), f(self._wrapped, name)
return summary(*results)
if make_call:
def call(*args, **kwargs):
return summarize_with(lambda obj, name: getattr(obj, name)(*args, **kwargs))
return call
else:
return summarize_with(lambda obj, name: getattr(obj, name))
def get_mean():
# for attributes that exist on the wrapped object, just compute the mean of the wrapped calls
return proxy(callable(getattr(self._instances[0], name)), name, lambda arr, _: np.mean(arr, axis=0))
def get_std():
prefix = name[: - len('_std')]
return proxy(callable(getattr(self._instances[0], prefix)), prefix,
lambda arr, _: np.std(arr, axis=0))
def get_interval():
# if the attribute exists on the wrapped object once we remove the suffix,
# then we should be computing a confidence interval for the wrapped calls
prefix = name[: - len("_interval")]
def call_with_bounds(can_call, lower, upper):
def percentile_bootstrap(arr, _):
return np.percentile(arr, lower, axis=0), np.percentile(arr, upper, axis=0)
def pivot_bootstrap(arr, est):
return 2 * est - np.percentile(arr, upper, axis=0), 2 * est - np.percentile(arr, lower, axis=0)
def normal_bootstrap(arr, est):
std = np.std(arr, axis=0)
return est - norm.ppf(upper / 100) * std, est - norm.ppf(lower / 100) * std
# TODO: studentized bootstrap? this would be more accurate in most cases but can we avoid
# second level bootstrap which would be prohibitive computationally?
fn = {'percentile': percentile_bootstrap,
'normal': normal_bootstrap,
'pivot': pivot_bootstrap}[self._bootstrap_type]
return proxy(can_call, prefix, fn)
can_call = callable(getattr(self._instances[0], prefix))
if can_call:
# collect extra arguments and pass them through, if the wrapped attribute was callable
def call(*args, lower=5, upper=95, **kwargs):
return call_with_bounds(can_call, lower, upper)(*args, **kwargs)
return call
else:
# don't pass extra arguments if the wrapped attribute wasn't callable to begin with
def call(lower=5, upper=95):
return call_with_bounds(can_call, lower, upper)
return call
def get_inference():
# can't import from econml.inference at top level without creating cyclical dependencies
from ._inference import EmpiricalInferenceResults, NormalInferenceResults
from .._cate_estimator import LinearModelFinalCateEstimatorDiscreteMixin
prefix = name[: - len("_inference")]
def fname_transformer(x):
return x
if prefix in ['const_marginal_effect', 'marginal_effect', 'effect']:
inf_type = 'effect'
elif prefix == 'coef_':
inf_type = 'coefficient'
if (hasattr(self._instances[0], 'cate_feature_names') and
callable(self._instances[0].cate_feature_names)):
def fname_transformer(x):
return self._instances[0].cate_feature_names(x)
elif prefix == 'intercept_':
inf_type = 'intercept'
else:
raise AttributeError("Unsupported inference: " + name)
d_t = self._wrapped._d_t[0] if self._wrapped._d_t else 1
if prefix == 'effect' or (isinstance(self._wrapped, LinearModelFinalCateEstimatorDiscreteMixin) and
(inf_type == 'coefficient' or inf_type == 'intercept')):
d_t = None
d_y = self._wrapped._d_y[0] if self._wrapped._d_y else 1
can_call = callable(getattr(self._instances[0], prefix))
kind = self._bootstrap_type
if kind == 'percentile' or kind == 'pivot':
def get_dist(est, arr):
if kind == 'percentile':
return arr
elif kind == 'pivot':
return 2 * est - arr
else:
raise ValueError("Invalid kind, must be either 'percentile' or 'pivot'")
def get_result():
return proxy(can_call, prefix,
lambda arr, est: EmpiricalInferenceResults(
d_t=d_t, d_y=d_y,
pred=est, pred_dist=get_dist(est, arr),
inf_type=inf_type,
fname_transformer=fname_transformer,
feature_names=self._wrapped.cate_feature_names(),
output_names=self._wrapped.cate_output_names(),
treatment_names=self._wrapped.cate_treatment_names()
))
# Note that inference results are always methods even if the inference is for a property
# (e.g. coef__inference() is a method but coef_ is a property)
# Therefore we must insert a lambda if getting inference for a non-callable
return get_result() if can_call else get_result
else:
assert kind == 'normal'
def normal_inference(*args, **kwargs):
pred = getattr(self._wrapped, prefix)
if can_call:
pred = pred(*args, **kwargs)
stderr = getattr(self, prefix + '_std')
if can_call:
stderr = stderr(*args, **kwargs)
return NormalInferenceResults(
d_t=d_t, d_y=d_y, pred=pred,
pred_stderr=stderr, mean_pred_stderr=None, inf_type=inf_type,
fname_transformer=fname_transformer,
feature_names=self._wrapped.cate_feature_names(),
output_names=self._wrapped.cate_output_names(),
treatment_names=self._wrapped.cate_treatment_names())
# If inference is for a property, create a fresh lambda to avoid passing args through
return normal_inference if can_call else lambda: normal_inference()
caught = None
m = None
if name.endswith("_interval"):
m = get_interval
elif name.endswith("_std"):
m = get_std
elif name.endswith("_inference"):
m = get_inference
# try to get interval/std first if appropriate,
# since we don't prefer a wrapped method with this name
if m is not None:
try:
return m()
except AttributeError as err:
caught = err
if self._compute_means:
return get_mean()
raise (caught if caught else AttributeError(name))