def compute_sequential_adjusted_alpha()

in spotify_confidence/analysis/frequentist/confidence_computers/z_test_computer.py [0:0]


def compute_sequential_adjusted_alpha(df: DataFrame, **kwargs: Dict[str, str]):
    denominator = kwargs[DENOMINATOR]
    final_expected_sample_size_column = kwargs[FINAL_EXPECTED_SAMPLE_SIZE]
    ordinal_group_column = kwargs[ORDINAL_GROUP_COLUMN]
    n_comparisons = kwargs[NUMBER_OF_COMPARISONS]

    if not df.reset_index()[ordinal_group_column].is_unique:
        raise ValueError("Ordinal values cannot be duplicated")

    def adjusted_alphas_for_group(grp: DataFrame) -> Series:
        return (
            sequential_bounds(
                t=grp["sample_size_proportions"].values,
                alpha=grp[ALPHA].values[0] / n_comparisons,
                sides=2 if (grp[PREFERENCE_TEST] == TWO_SIDED).all() else 1,
            )
            .df.set_index(grp.index)
            .assign(
                **{
                    ADJUSTED_ALPHA: lambda df: df.apply(
                        lambda row: (
                            2 * (1 - st.norm.cdf(row["zb"]))
                            if (grp[PREFERENCE_TEST] == TWO_SIDED).all()
                            else 1 - st.norm.cdf(row["zb"])
                        ),
                        axis=1,
                    )
                }
            )
        )[["zb", ADJUSTED_ALPHA]]

    comparison_total_column = "comparison_total_" + denominator
    return Series(
        data=(
            df.assign(**{comparison_total_column: df[denominator + SFX1] + df[denominator + SFX2]})
            .assign(
                max_sample_size=lambda df: df[[comparison_total_column, final_expected_sample_size_column]]
                .max(axis=1)
                .max()
            )
            .assign(sample_size_proportions=lambda df: df[comparison_total_column] / df["max_sample_size"])
            .pipe(adjusted_alphas_for_group)[ADJUSTED_ALPHA]
        ),
        name=ADJUSTED_ALPHA,
    )