# Copyright 2022 - 2026 The PyMC Labs Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Regression kink design
"""
import warnings # noqa: I001
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from patsy import build_design_matrices, dmatrices
import xarray as xr
from causalpy.plot_utils import plot_xY
from causalpy.pymc_models import LinearRegression, PyMCModel
from causalpy.reporting import EffectSummary, _effect_summary_rkink
from causalpy.constants import HDI_PROB, LEGEND_FONT_SIZE
from .base import BaseExperiment
from typing import Any, Literal
from causalpy.utils import _is_variable_dummy_coded, round_num
from causalpy.custom_exceptions import (
DataException,
FormulaException,
)
[docs]
class RegressionKink(BaseExperiment):
"""A class to analyse regression kink designs.
:param data:
A pandas dataframe
:param formula:
A statistical model formula
:param kink_point:
A scalar value at which the kink occurs
:param model:
A PyMC model. Defaults to LinearRegression.
:param running_variable_name:
The name of the running variable column
:param epsilon:
A small scalar for evaluating the causal impact above/below the kink
:param bandwidth:
Data outside of the bandwidth (relative to the kink) is not used to fit
the model.
"""
supports_ols = False
supports_bayes = True
_default_model_class = LinearRegression
[docs]
def __init__(
self,
data: pd.DataFrame,
formula: str,
kink_point: float,
model: PyMCModel | None = None,
running_variable_name: str = "x",
epsilon: float = 0.001,
bandwidth: float = np.inf,
**kwargs: Any,
) -> None:
super().__init__(model=model)
self.expt_type = "Regression Kink"
self.data = data
self.formula = formula
self.running_variable_name = running_variable_name
self.kink_point = kink_point
self.epsilon = epsilon
self.bandwidth = bandwidth
self.input_validation()
self._build_design_matrices()
self._prepare_data()
self.algorithm()
def _build_design_matrices(self) -> None:
"""Build design matrices from formula and data, applying bandwidth filtering."""
if self.bandwidth is not np.inf:
fmin = self.kink_point - self.bandwidth
fmax = self.kink_point + self.bandwidth
filtered_data = self.data.query(f"{fmin} <= x <= {fmax}")
if len(filtered_data) <= 10:
warnings.warn(
f"Choice of bandwidth parameter has lead to only {len(filtered_data)} remaining datapoints. Consider increasing the bandwidth parameter.", # noqa: E501
UserWarning,
stacklevel=2,
)
y, X = dmatrices(self.formula, filtered_data)
else:
y, X = dmatrices(self.formula, self.data)
self._y_design_info = y.design_info
self._x_design_info = X.design_info
self.labels = X.design_info.column_names
self.y, self.X = np.asarray(y), np.asarray(X)
self.outcome_variable_name = y.design_info.column_names[0]
def _prepare_data(self) -> None:
"""Convert design matrices to xarray DataArrays."""
self.X = xr.DataArray(
self.X,
dims=["obs_ind", "coeffs"],
coords={
"obs_ind": np.arange(self.X.shape[0]),
"coeffs": self.labels,
},
)
self.y = xr.DataArray(
self.y,
dims=["obs_ind", "treated_units"],
coords={"obs_ind": np.arange(self.y.shape[0]), "treated_units": ["unit_0"]},
)
[docs]
def algorithm(self) -> None:
"""Run the experiment algorithm: fit model, predict, and evaluate gradient change."""
COORDS = {
"coeffs": self.labels,
"obs_ind": np.arange(self.X.shape[0]),
"treated_units": ["unit_0"],
}
self.model.fit(X=self.X, y=self.y, coords=COORDS)
# score the goodness of fit to all data
self.score = self.model.score(X=self.X, y=self.y)
# get the model predictions of the observed data
if self.bandwidth is not np.inf:
fmin = self.kink_point - self.bandwidth
fmax = self.kink_point + self.bandwidth
xi = np.linspace(fmin, fmax, 200)
else:
xi = np.linspace(
np.min(self.data[self.running_variable_name]),
np.max(self.data[self.running_variable_name]),
200,
)
self.x_pred = pd.DataFrame(
{self.running_variable_name: xi, "treated": self._is_treated(xi)}
)
(new_x,) = build_design_matrices([self._x_design_info], self.x_pred)
self.pred = self.model.predict(X=np.asarray(new_x))
# evaluate gradient change around kink point
mu_kink_left, mu_kink, mu_kink_right = self._probe_kink_point()
self.gradient_change = self._eval_gradient_change(
mu_kink_left, mu_kink, mu_kink_right, self.epsilon
)
@staticmethod
def _eval_gradient_change(
mu_kink_left: xr.DataArray,
mu_kink: xr.DataArray,
mu_kink_right: xr.DataArray,
epsilon: float,
) -> xr.DataArray:
"""Evaluate the gradient change at the kink point.
It works by evaluating the model below the kink point, at the kink point,
and above the kink point.
This is a static method for ease of testing.
"""
gradient_left = (mu_kink - mu_kink_left) / epsilon
gradient_right = (mu_kink_right - mu_kink) / epsilon
gradient_change = gradient_right - gradient_left
return gradient_change
def _probe_kink_point(self) -> tuple[xr.DataArray, xr.DataArray, xr.DataArray]:
"""Probe the kink point to evaluate the predicted outcome at the kink point and
either side."""
# Create a dataframe to evaluate predicted outcome at the kink point and either
# side
x_predict = pd.DataFrame(
{
self.running_variable_name: np.array(
[
self.kink_point - self.epsilon,
self.kink_point,
self.kink_point + self.epsilon,
]
),
"treated": np.array([0, 1, 1]),
}
)
(new_x,) = build_design_matrices([self._x_design_info], x_predict)
predicted = self.model.predict(X=np.asarray(new_x))
# extract predicted mu values
mu_kink_left = predicted["posterior_predictive"].sel(obs_ind=0)["mu"]
mu_kink = predicted["posterior_predictive"].sel(obs_ind=1)["mu"]
mu_kink_right = predicted["posterior_predictive"].sel(obs_ind=2)["mu"]
return mu_kink_left, mu_kink, mu_kink_right
def _is_treated(self, x: np.ndarray | pd.Series) -> np.ndarray:
"""Returns ``True`` if `x` is greater than or equal to the treatment threshold.""" # noqa: E501
return np.greater_equal(x, self.kink_point)
[docs]
def summary(self, round_to: int | None = 2) -> None:
"""Print summary of main results and model coefficients.
:param round_to:
Number of decimals used to round results. Defaults to 2. Use "None" to return raw numbers
"""
print(
f"""
{self.expt_type:=^80}
Formula: {self.formula}
Running variable: {self.running_variable_name}
Kink point on running variable: {self.kink_point}
Results:
Change in slope at kink point = {round_num(self.gradient_change.mean(), round_to)}
"""
)
self.print_coefficients(round_to)
[docs]
def plot(
self,
*,
round_to: int | None = 2,
hdi_prob: float = HDI_PROB,
figsize: tuple[float, float] | None = None,
show: bool = True,
legend_kwargs: dict[str, Any] | None = None,
) -> tuple[plt.Figure, plt.Axes]:
"""Plot the regression kink results.
Parameters
----------
round_to : int, optional
Number of decimals used to round numerical results in the figure
title (e.g. the Bayesian :math:`R^2`). Defaults to 2. Use
``None`` to render raw numbers.
hdi_prob : float
Probability mass of the highest density interval drawn around the
posterior predictive band, and the central credible interval
reported in the figure title for the change in gradient at the
kink point. Must be in ``(0, 1]``. Defaults to
:data:`~causalpy.constants.HDI_PROB` (currently 0.94).
figsize : tuple of (float, float), optional
Width and height of the figure in inches, passed to
:func:`matplotlib.pyplot.subplots`. Defaults to ``None`` (use
matplotlib's default).
show : bool
Whether to automatically display the plot. Defaults to ``True``.
legend_kwargs : dict, optional
Keyword arguments to adjust legend placement and styling.
Supported keys: ``loc``, ``bbox_to_anchor``, ``fontsize``,
``frameon``, ``title`` (``bbox_transform`` is accepted alongside
``bbox_to_anchor``). The existing legend is modified **in
place** so that custom handles are preserved.
Returns
-------
fig : matplotlib.figure.Figure
The figure that was created.
ax : matplotlib.axes.Axes
The axes object containing the plot.
"""
return self._render_plot(
show=show,
legend_kwargs=legend_kwargs,
round_to=round_to,
hdi_prob=hdi_prob,
figsize=figsize,
)
def _bayesian_plot(
self,
round_to: int | None = 2,
hdi_prob: float = HDI_PROB,
figsize: tuple[float, float] | None = None,
**kwargs: Any,
) -> tuple[plt.Figure, plt.Axes]:
"""Generate plot for regression kink designs.
Parameters
----------
round_to : int, optional
Number of decimals used to round results. Defaults to 2. Use ``None``
to return raw numbers.
hdi_prob : float, optional
Probability mass of the highest density interval drawn around the
posterior predictive band, and the central credible interval
reported in the figure title for the change in gradient at the
kink point. Must be in ``(0, 1]``. Defaults to
:data:`~causalpy.constants.HDI_PROB` (currently 0.94).
figsize : tuple of (float, float), optional
Width and height of the figure in inches. Defaults to ``None``
(use matplotlib's default).
"""
fig, ax = plt.subplots(figsize=figsize)
# Plot raw data
sns.scatterplot(
self.data,
x=self.running_variable_name,
y=self.outcome_variable_name,
c="k", # hue="treated",
ax=ax,
)
# Plot model fit to data
h_line, h_patch = plot_xY(
self.x_pred[self.running_variable_name],
self.pred["posterior_predictive"].mu.isel(treated_units=0),
ax=ax,
hdi_prob=hdi_prob,
plot_hdi_kwargs={"color": "C1"},
)
handles = [(h_line, h_patch)]
labels = ["Posterior mean"]
# create strings to compose title
title_info = f"{round_num(self.score['unit_0_r2'], round_to if round_to is not None else 2)} (std = {round_num(self.score['unit_0_r2_std'], round_to if round_to is not None else 2)})"
r2 = f"Bayesian $R^2$ on all data = {title_info}"
percentiles = self.gradient_change.quantile(
[(1 - hdi_prob) / 2, 1 - (1 - hdi_prob) / 2]
).values
ci = (
rf"$CI_{{{hdi_prob * 100:.0f}\%}}$"
+ f"[{round_num(percentiles[0], round_to if round_to is not None else 2)}, {round_num(percentiles[1], round_to if round_to is not None else 2)}]"
)
grad_change = f"""
Change in gradient = {round_num(self.gradient_change.mean(), round_to if round_to is not None else 2)},
"""
ax.set(title=r2 + "\n" + grad_change + ci)
# Intervention line
ax.axvline(
x=self.kink_point,
ls="-",
lw=3,
color="r",
label="treatment threshold",
)
ax.legend(
handles=(h_tuple for h_tuple in handles),
labels=labels,
fontsize=LEGEND_FONT_SIZE,
)
return fig, ax
[docs]
def effect_summary(
self,
*,
direction: Literal["increase", "decrease", "two-sided"] = "increase",
alpha: float = 0.05,
min_effect: float | None = None,
**kwargs: Any,
) -> EffectSummary:
"""
Generate a decision-ready summary of causal effects for Regression Kink.
Parameters
----------
direction : {"increase", "decrease", "two-sided"}, default="increase"
Direction for tail probability calculation (PyMC only, ignored for OLS).
alpha : float, default=0.05
Significance level for HDI/CI intervals (1-alpha confidence level).
min_effect : float, optional
Region of Practical Equivalence (ROPE) threshold (PyMC only, ignored for OLS).
Returns
-------
EffectSummary
Object with .table (DataFrame) and .text (str) attributes
"""
return _effect_summary_rkink(
self,
direction=direction,
alpha=alpha,
min_effect=min_effect,
)