in dowhy/causal_refuters/add_unobserved_common_cause.py [0:0]
def include_simulated_confounder(self, convergence_threshold = 0.1, c_star_max = 1000):
'''
This function simulates an unobserved confounder based on the data using the following steps:
1. It calculates the "residuals" from the treatment and outcome model
i.) The outcome model has outcome as the dependent variable and all the observed variables including treatment as independent variables
ii.) The treatment model has treatment as the dependent variable and all the observed variables as independent variables.
2. U is an intermediate random variable drawn from the normal distribution with the weighted average of residuals as mean and a unit variance
U ~ N(c1*d_y + c2*d_t, 1)
where
*d_y and d_t are residuals from the treatment and outcome model
*c1 and c2 are coefficients to the residuals
3. The final U, which is the simulated unobserved confounder is obtained by debiasing the intermediate variable U by residualising it with X
Choosing the coefficients c1 and c2:
The coefficients are chosen based on these basic assumptions:
1. There is a hyperbolic relationship satisfying c1*c2 = c_star
2. c_star is chosen from a range of possible values based on the correlation of the obtained simulated variable with outcome and treatment.
3. The product of correlations with treatment and outcome should be at a minimum distance to the maximum correlations with treatment and outcome in any of the observed confounders
4. The ratio of the weights should be such that they maintain the ratio of the maximum possible observed coefficients within some confidence interval
:param c_star_max: The maximum possible value for the hyperbolic curve on which the coefficients to the residuals lie. It defaults to 1000 in the code if not specified by the user.
:type int
:param convergence_threshold: The threshold to check the plateauing of the correlation while selecting a c_star. It defaults to 0.1 in the code if not specified by the user
:type float
:returns final_U: The simulated values of the unobserved confounder based on the data
:type pandas.core.series.Series
'''
#Obtaining the list of observed variables
required_variables = True
observed_variables = self.choose_variables(required_variables)
observed_variables_with_treatment_and_outcome = observed_variables + self._treatment_name + self._outcome_name
#Taking a subset of the dataframe that has only observed variables
self._data = self._data[observed_variables_with_treatment_and_outcome]
#Residuals from the outcome model obtained by fitting a linear model
y = self._data[self._outcome_name[0]]
observed_variables_with_treatment = observed_variables + self._treatment_name
X = self._data[observed_variables_with_treatment]
model = sm.OLS(y,X.astype('float'))
results = model.fit()
residuals_y = y - results.fittedvalues
d_y = list(pd.Series(residuals_y))
#Residuals from the treatment model obtained by fitting a linear model
t = self._data[self._treatment_name[0]].astype('int64')
X = self._data[observed_variables]
model = sm.OLS(t,X)
results = model.fit()
residuals_t = t - results.fittedvalues
d_t = list(pd.Series(residuals_t))
#Initialising product_cor_metric_observed with a really low value as finding maximum
product_cor_metric_observed = -10000000000
for i in observed_variables:
current_obs_confounder = self._data[i]
outcome_values = self._data[self._outcome_name[0]]
correlation_y = current_obs_confounder.corr(outcome_values)
treatment_values = t
correlation_t = current_obs_confounder.corr(treatment_values)
product_cor_metric_current = correlation_y*correlation_t
if product_cor_metric_current>=product_cor_metric_observed:
product_cor_metric_observed = product_cor_metric_current
correlation_t_observed = correlation_t
correlation_y_observed = correlation_y
#The user has an option to give the the effect_strength_on_y and effect_strength_on_t which can be then used instead of maximum correlation with treatment and outcome in the observed variables as it specifies the desired effect.
if self.kappa_t is not None:
correlation_t_observed = self.kappa_t
if self.kappa_y is not None:
correlation_y_observed = self.kappa_y
#Choosing a c_star based on the data.
#The correlations stop increasing upon increasing c_star after a certain value, that is it plateaus and we choose the value of c_star to be the value it plateaus.
correlation_y_list = []
correlation_t_list = []
product_cor_metric_simulated_list = []
x_list = []
step = int(c_star_max/10)
for i in range(0, int(c_star_max), step):
c1 = math.sqrt(i)
c2 = c1
final_U = self.generate_confounder_from_residuals(c1, c2, d_y, d_t, X)
current_simulated_confounder = final_U
outcome_values = self._data[self._outcome_name[0]]
correlation_y = current_simulated_confounder.corr(outcome_values)
correlation_y_list.append(correlation_y)
treatment_values = t
correlation_t = current_simulated_confounder.corr(treatment_values)
correlation_t_list.append(correlation_t)
product_cor_metric_simulated = correlation_y*correlation_t
product_cor_metric_simulated_list.append(product_cor_metric_simulated)
x_list.append(i)
index = 1
while index<len(correlation_y_list):
if (correlation_y_list[index]-correlation_y_list[index-1])<=convergence_threshold:
c_star = x_list[index]
break
index = index+1
#Choosing c1 and c2 based on the hyperbolic relationship once c_star is chosen by going over various combinations of c1 and c2 values and choosing the combination which
#which maintains the minimum distance between the product of correlations of the simulated variable and the product of maximum correlations of one of the observed variables
# and additionally checks if the ratio of the weights are such that they maintain the ratio of the maximum possible observed coefficients within some confidence interval
#c1_final and c2_final are initialised to the values on the hyperbolic curve such that c1_final = c2_final and c1_final*c2_final = c_star
c1_final = math.sqrt(c_star)
c2_final = math.sqrt(c_star)
#initialising min_distance_between_product_cor_metrics to be a value greater than 1
min_distance_between_product_cor_metrics = 1.5
i = 0.05
threshold = c_star/0.05
while i<=threshold:
c2 = i
c1 = c_star/c2
final_U = self.generate_confounder_from_residuals(c1, c2, d_y, d_t, X)
current_simulated_confounder = final_U
outcome_values = self._data[self._outcome_name[0]]
correlation_y = current_simulated_confounder.corr(outcome_values)
treatment_values = t
correlation_t = current_simulated_confounder.corr(treatment_values)
product_cor_metric_simulated = correlation_y*correlation_t
if min_distance_between_product_cor_metrics>=abs(product_cor_metric_simulated - product_cor_metric_observed):
min_distance_between_product_cor_metrics = abs(product_cor_metric_simulated - product_cor_metric_observed)
additional_condition = (correlation_y_observed/correlation_t_observed)
if ((c1/c2) <= (additional_condition + 0.3*additional_condition)) and ((c1/c2) >= (additional_condition - 0.3*additional_condition)): #choose minimum positive value
c1_final = c1
c2_final = c2
i = i*1.5
'''#closed form solution
print("c_star_max before closed form", c_star_max)
if max_correlation_with_t == -1000:
c2 = 0
c1 = c_star_max
else:
additional_condition = abs(max_correlation_with_y/max_correlation_with_t)
print("additional_condition", additional_condition)
c2 = math.sqrt(c_star_max/additional_condition)
c1 = c_star_max/c2'''
final_U = self.generate_confounder_from_residuals(c1_final, c2_final, d_y, d_t, X)
return final_U