def _ci_width()

in spotify_confidence/analysis/frequentist/confidence_computers/sample_size_computer.py [0:0]


def _ci_width(df: DataFrame, **kwargs: Dict) -> DataFrame:
    expected_sample_size = (
        None if kwargs[FINAL_EXPECTED_SAMPLE_SIZE] is None else df[kwargs[FINAL_EXPECTED_SAMPLE_SIZE]].values[0]
    )
    if expected_sample_size is None or np.isnan(expected_sample_size):
        return df.assign(**{CI_WIDTH: None})

    all_weights = kwargs[TREATMENT_WEIGHTS]
    control_weight, treatment_weights = all_weights[0], all_weights[1:]
    sum_of_weights = sum(all_weights)

    control_count = int((control_weight / sum_of_weights) * expected_sample_size)
    if control_count == 0:
        return df.assign(**{CI_WIDTH: float("inf")})

    else:
        binary = df[kwargs[IS_BINARY]].values[0]
        z_alpha = st.norm.ppf(
            1
            - df[ADJUSTED_ALPHA_POWER_SAMPLE_SIZE].values[0] / (2 if df[PREFERENCE_TEST].values[0] == TWO_SIDED else 1)
        )

        non_inferiority = is_non_inferiority(df[NIM].values[0])
        max_ci_width = 0
        for treatment_weight in treatment_weights:
            treatment_count = int((treatment_weight / sum_of_weights) * expected_sample_size)
            if treatment_count == 0:
                return df.assign(**{CI_WIDTH: float("inf")})
            else:
                comparison_ci_width = confidence_computers[ZTEST].ci_width(
                    z_alpha=z_alpha,
                    binary=binary,
                    non_inferiority=non_inferiority,
                    hypothetical_effect=df[ALTERNATIVE_HYPOTHESIS] - df[NULL_HYPOTHESIS],
                    control_avg=df[POINT_ESTIMATE],
                    control_var=df[VARIANCE],
                    control_count=control_count,
                    treatment_count=treatment_count,
                )

            max_ci_width = max(comparison_ci_width.max(), max_ci_width)

        df[CI_WIDTH] = None if max_ci_width == 0 else max_ci_width

    return df