diff --git a/CLAUDE.md b/CLAUDE.md index 5b6d3709..79fb7af3 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -259,6 +259,10 @@ When writing new public-facing code, always accept and return `period`. Convert ## Testing -- pytest with markers: `wip`, `unit`, `integration`, `end_to_end` +- pytest with markers: `wip`, `unit`, `integration`, `end_to_end`, `long_running` - Test files mirror source structure in `tests/` - Memory profiling available via pytest-memray (Unix only) +- MATLAB AF CES / translog reproduction tests live in the parent workspace at + `../matlab_ces_repro/` (alongside `sim_repro/`), not in this library. They depend on + reference data at `/home/hmg/sciebo/Skill estimation/` and the CNLSY xls bundled + beside them. Run from the workspace root. diff --git a/docs/getting_started/tutorial.ipynb b/docs/getting_started/tutorial.ipynb index b123b437..954e46e6 100644 --- a/docs/getting_started/tutorial.ipynb +++ b/docs/getting_started/tutorial.ipynb @@ -22,7 +22,7 @@ "import pandas as pd\n", "\n", "from skillmodels import get_maximization_inputs\n", - "from skillmodels.config import TEST_DATA_DIR\n", + "from skillmodels.common.config import TEST_DATA_DIR\n", "from skillmodels.test_data.model2 import MODEL2" ] }, diff --git a/docs/how_to_guides/how_to_simulate_dataset.ipynb b/docs/how_to_guides/how_to_simulate_dataset.ipynb index e15340fa..47dfe2ab 100644 --- a/docs/how_to_guides/how_to_simulate_dataset.ipynb +++ b/docs/how_to_guides/how_to_simulate_dataset.ipynb @@ -8,8 +8,8 @@ "source": [ "import pandas as pd\n", "\n", - "from skillmodels.config import REGRESSION_VAULT, TEST_DATA_DIR\n", - "from skillmodels.simulate_data import simulate_dataset\n", + "from skillmodels.common.config import REGRESSION_VAULT, TEST_DATA_DIR\n", + "from skillmodels.common.simulate_data import simulate_dataset\n", "from skillmodels.test_data.model2 import MODEL2" ] }, diff --git a/docs/how_to_guides/how_to_visualize_correlations.ipynb b/docs/how_to_guides/how_to_visualize_correlations.ipynb index 57e2c484..62efd7b5 100644 --- a/docs/how_to_guides/how_to_visualize_correlations.ipynb +++ b/docs/how_to_guides/how_to_visualize_correlations.ipynb @@ -15,8 +15,8 @@ "source": [ "import pandas as pd\n", "\n", - "from skillmodels.config import REGRESSION_VAULT, TEST_DATA_DIR\n", - "from skillmodels.correlation_heatmap import (\n", + "from skillmodels.common.config import REGRESSION_VAULT, TEST_DATA_DIR\n", + "from skillmodels.common.correlation_heatmap import (\n", " get_measurements_corr,\n", " get_quasi_scores_corr,\n", " get_scores_corr,\n", @@ -167,7 +167,7 @@ "metadata": {}, "outputs": [], "source": [ - "from skillmodels.visualize_transition_equations import (\n", + "from skillmodels.common.visualize_transition_equations import (\n", " _get_parsed_params,\n", " _set_index_params,\n", ")" @@ -179,7 +179,7 @@ "metadata": {}, "outputs": [], "source": [ - "from skillmodels.process_model import process_model" + "from skillmodels.common.process_model import process_model" ] }, { diff --git a/docs/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb b/docs/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb index 73831c95..9e374ab2 100644 --- a/docs/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb +++ b/docs/how_to_guides/how_to_visualize_pairwise_factor_distribution.ipynb @@ -10,16 +10,16 @@ "import numpy as np\n", "import pandas as pd\n", "\n", - "from skillmodels.config import REGRESSION_VAULT, TEST_DATA_DIR\n", - "from skillmodels.maximization_inputs import get_maximization_inputs\n", - "from skillmodels.simulate_data import simulate_dataset\n", - "from skillmodels.test_data.model2 import MODEL2\n", - "from skillmodels.visualize_factor_distributions import (\n", + "from skillmodels.chs.maximization_inputs import get_maximization_inputs\n", + "from skillmodels.common.config import REGRESSION_VAULT, TEST_DATA_DIR\n", + "from skillmodels.common.simulate_data import simulate_dataset\n", + "from skillmodels.common.visualize_factor_distributions import (\n", " bivariate_density_contours,\n", " bivariate_density_surfaces,\n", " combine_distribution_plots,\n", " univariate_densities,\n", - ")" + ")\n", + "from skillmodels.test_data.model2 import MODEL2" ] }, { diff --git a/docs/how_to_guides/how_to_visualize_transition_equations.ipynb b/docs/how_to_guides/how_to_visualize_transition_equations.ipynb index d4581df2..bcdcbbad 100644 --- a/docs/how_to_guides/how_to_visualize_transition_equations.ipynb +++ b/docs/how_to_guides/how_to_visualize_transition_equations.ipynb @@ -9,12 +9,12 @@ "source": [ "import pandas as pd\n", "\n", - "from skillmodels.config import REGRESSION_VAULT, TEST_DATA_DIR\n", - "from skillmodels.test_data.model2 import MODEL2\n", - "from skillmodels.visualize_transition_equations import (\n", + "from skillmodels.common.config import REGRESSION_VAULT, TEST_DATA_DIR\n", + "from skillmodels.common.visualize_transition_equations import (\n", " combine_transition_plots,\n", " get_transition_plots,\n", - ")" + ")\n", + "from skillmodels.test_data.model2 import MODEL2" ] }, { diff --git a/docs/how_to_guides/model_specs.md b/docs/how_to_guides/model_specs.md index e0f848b2..7dd09f8a 100644 --- a/docs/how_to_guides/model_specs.md +++ b/docs/how_to_guides/model_specs.md @@ -114,7 +114,7 @@ Fine-tune the estimation: Define custom transition equations using the `@register_params` decorator: ```python -from skillmodels.decorators import register_params +from skillmodels.common.decorators import register_params @register_params(params=["lincoeff"]) def my_linear(fac, params): diff --git a/environment-cuda.yml b/environment-cuda.yml new file mode 100644 index 00000000..9be30e14 --- /dev/null +++ b/environment-cuda.yml @@ -0,0 +1,67 @@ +--- +# Conda/mamba environment for the skillmodels `af-estimator` branch +# **with CUDA 12 GPU support**. +# +# Same package set as `environment.yml`, except JAX is pulled in via the +# `jax[cuda12]` PyPI extra and `cuda-nvcc` is added on the conda side. +# Requires the host system to provide a CUDA 12 toolkit; see +# https://jax.readthedocs.io/en/latest/installation.html for details. +# +# Usage: +# mamba env create -f environment-cuda.yml +# mamba activate skillmodels-af-cuda +name: skillmodels-af-cuda +channels: + - conda-forge + - nodefaults +dependencies: + # CUDA toolchain (required for the cuda12 JAX wheel below) + - cuda-nvcc >=12 + # Python + core scientific stack + - python ~=3.14.0 + - scipy >=1.16.0 + - h5py >=3.16.0,<4 + # Skillmodels conda deps + - filterpy * + - ipykernel >=6.29.5 + - jupyterlab * + - nbformat >=5.10.4 + - networkx * + - pybaum >=0.1.3 + - scikit-learn >=1.5 # AMN Stage 1 (mixture EM) + # Test / profiling tooling + - pytest >=8.4.1 + - pytest-cov >=6.2.1 + - pytest-xdist >=3.8.0 + - pytest-memray * + - snakeviz * + - xlrd >=2 + - prek * + # Downstream-only conda deps (skane-struct-bw / health-cognition): + - deepdiff >=8.5.0 + - memray >=1.17.2 + - statsmodels >=0.14.5 + - tabulate >=0.9.0 + - seaborn * + - pip + - pip: + # Skillmodels project deps (PyPI), with CUDA-12 JAX wheel + - dags>=0.5.1 + - jax[cuda12]>=0.9 + - jupyter-book>=2 + - kaleido>=1.2 + - numpy>=2.4 + - pandas>=3 + - plotly>=6.6 + - pytask>=0.5.8 + - pytask-parallel>=0.5.2 + - pdbp + # Pinned to the optimagic branch the AF estimator relies on. + # yamllint disable-line rule:line-length + - optimagic @ git+https://github.com/optimagic-dev/optimagic.git@probability-allow-fixed-entries + # Downstream-only PyPI deps: + - fides>=0.7.8 + - statadict>=1.1.0 + # Skillmodels itself, from the af-estimator branch. + # yamllint disable-line rule:line-length + - skillmodels @ git+https://github.com/OpenSourceEconomics/skillmodels.git@af-estimator diff --git a/environment.yml b/environment.yml new file mode 100644 index 00000000..684f5b34 --- /dev/null +++ b/environment.yml @@ -0,0 +1,70 @@ +--- +# Conda/mamba environment for the skillmodels `af-estimator` branch. +# +# Installs every package needed to run the skillmodels test suite **and** +# the two downstream research applications (`skane-struct-bw`, +# `health-cognition`) -- minus the two applications themselves, which +# are supplied separately by their respective project teams. +# +# CPU-only JAX. For an environment with CUDA-12 support use +# `environment-cuda.yml` instead. +# +# Usage: +# mamba env create -f environment.yml +# mamba activate skillmodels-af +name: skillmodels-af +channels: + - conda-forge + - nodefaults +dependencies: + # Python + core scientific stack + - python ~=3.14.0 + - scipy >=1.16.0 + - h5py >=3.16.0,<4 + # Skillmodels conda deps + - filterpy * + - ipykernel >=6.29.5 + - jupyterlab * + - nbformat >=5.10.4 + - networkx * + - pybaum >=0.1.3 + - scikit-learn >=1.5 # AMN Stage 1 (mixture EM) + # Test / profiling tooling (skillmodels' tests-cpu feature) + - pytest >=8.4.1 + - pytest-cov >=6.2.1 + - pytest-xdist >=3.8.0 + - pytest-memray * + - snakeviz * + - xlrd >=2 + - prek * + # Downstream-only conda deps (not used by skillmodels itself; required + # to run skane-struct-bw / health-cognition pipelines): + - deepdiff >=8.5.0 # health-cognition + skane: snapshot / diff utilities + - memray >=1.17.2 # health-cognition + skane: memory profiling + - statsmodels >=0.14.5 # health-cognition + skane: regression diagnostics + - tabulate >=0.9.0 # health-cognition + skane: table formatting in reports + - seaborn * # health-cognition: figure styling + - pip + - pip: + # Skillmodels project deps (PyPI) + - dags>=0.5.1 + - jax>=0.9 + - jupyter-book>=2 + - kaleido>=1.2 + - numpy>=2.4 + - pandas>=3 + - plotly>=6.6 + - pytask>=0.5.8 + - pytask-parallel>=0.5.2 + - pdbp + # Pinned to the optimagic branch the AF estimator relies on + # (`probability-allow-fixed-entries`). The PyPI release does not + # yet carry the required `FixedConstraintWithValue` semantics. + # yamllint disable-line rule:line-length + - optimagic @ git+https://github.com/optimagic-dev/optimagic.git@probability-allow-fixed-entries + # Downstream-only PyPI deps (not used by skillmodels itself): + - fides>=0.7.8 # health-cognition + skane: optimagic algorithm + - statadict>=1.1.0 # health-cognition + skane: Stata variable labels + # The library itself, from the af-estimator branch. + # yamllint disable-line rule:line-length + - skillmodels @ git+https://github.com/OpenSourceEconomics/skillmodels.git@af-estimator diff --git a/pixi.lock b/pixi.lock index 60360ab9..406358d9 100644 --- a/pixi.lock +++ b/pixi.lock @@ -18,6 +18,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-auth-0.10.1-h2d2dd48_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-cal-0.9.13-h2c9d079_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-common-0.12.6-hb03c661_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-compression-0.3.2-h8b1a151_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-http-0.10.12-h4bacb7b_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-io-0.26.3-hc87160b_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-s3-0.11.5-h6d69fc9_5.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-sdkutils-0.2.4-h8b1a151_4.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-checksums-0.2.10-h8b1a151_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.18.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda @@ -29,6 +38,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-bin-1.2.0-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py314h3de4e8d_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hda65f42_9.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/c-ares-1.34.6-hb03c661_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.2.25-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 @@ -74,6 +84,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/gxx_linux-64-14.3.0-he467f4b_21.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/h5py-3.16.0-nompi_py314hddf7a69_102.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/hdf5-2.1.0-nompi_hd4fcb43_104.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda @@ -88,6 +100,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/joblib-1.5.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda @@ -110,13 +123,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.18-h0c24ade_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.45.1-default_hbd61a6d_101.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lerc-4.1.0-hdb68285_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libaec-1.1.5-h088129d_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libblas-3.11.0-5_h4a7cf45_openblas.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlicommon-1.2.0-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlidec-1.2.0-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlienc-1.2.0-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libcblas-3.11.0-5_h0358290_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libcurl-8.19.0-hcf29cc6_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libdeflate-1.25-h17f619e_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libedit-3.1.20250104-pl5321h7949ede_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libev-4.33-hd590300_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.7.4-hecca717_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.5.2-h3435931_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libfreetype-2.14.2-ha770c72_0.conda @@ -131,6 +147,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/liblapack-3.11.0-5_h47877c9_openblas.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/liblzma-5.8.2-hb03c661_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libmpdec-4.0.0-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libnghttp2-1.67.0-had1ee68_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libnvptxcompiler-dev-12.9.86-ha770c72_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/libnvptxcompiler-dev_linux-64-12.9.86-ha770c72_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libopenblas-0.3.30-pthreads_h94d23a6_4.conda @@ -138,6 +155,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/libsanitizer-14.3.0-h8f1669f_18.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libsodium-1.0.21-h280c20c_3.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.52.0-hf4e2dac_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libssh2-1.11.1-hcf80075_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-15.2.0-h934c35e_18.conda - conda: https://conda.anaconda.org/conda-forge/noarch/libstdcxx-devel_linux-64-14.3.0-h9f08a49_118.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-15.2.0-hdf11a46_18.conda @@ -145,7 +163,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.41.3-h5347b49_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libwebp-base-1.6.0-hd42ef1d_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libxcb-1.17.0-h8a09558_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.1-hb9d3cd8_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.2-h25fd6f3_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/markdown-it-py-4.0.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/markupsafe-3.0.3-py314h67df5f8_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py314h1194b4b_0.conda @@ -202,6 +220,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rich-14.3.3-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/s2n-1.7.1-h1cbb8d7_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/scikit-learn-1.8.0-np2py314hf09ca88_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.17.1-py314hf07bd8e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.1.0-pyha191276_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-82.0.1-pyh332efcf_0.conda @@ -211,6 +231,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/sysroot_linux-64-2.28-h4ee821c_9.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/threadpoolctl-3.6.0-pyhecae5ae_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_h366c992_103.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.4.0-pyhcf101f3_0.conda @@ -247,7 +268,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/a7/2c/8ddb471091b46de99bba7eaa7f4e3983f9c8e74e310e585ff08915ce8b7a/jax_cuda12_pjrt-0.9.1-py3-none-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/17/e9/3632d7eddb8f282b50bf7c095bba9de91aae0de9baea56d1699d982fe5f8/jax_cuda12_plugin-0.9.1-cp314-cp314-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/76/fe/67d2c414b0860d42f4a20b1fadbe7aeffb1b3d885efebd7aedf22a4bc2a2/jaxlib-0.9.1-cp314-cp314-manylinux_2_27_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7c/25/6fe2dfc3d830ec614c5f83f88fc7472c4ed892b7f7f496367d31de4110c4/jupyter_book-2.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/4b/97/f6de8d4af54d6401d6581a686cce3e3e2371a79ba459a449104e026c08bc/kaleido-1.2.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/20/6aa79ba3570bddd1bf7e951c6123f806751e58e8cce736bad77b2cf348d7/logistro-2.0.1-py3-none-any.whl @@ -269,7 +289,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/46/0c/c75bbfb967457a0b7670b8ad267bfc4fffdf341c074e0a80db06c24ccfd4/nvidia_nvjitlink_cu12-12.9.86-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl - pypi: https://files.pythonhosted.org/packages/44/6a/cf1265d48719852f5144055ff611d9e71678a9b29afb7ace72bf248a0cd8/nvidia_nvshmem_cu12-3.5.21-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/91/85/08c4e13a90a13c509d1fe09596dd8198338b6cfff9ee280f01ae7694889e/optimagic-0.5.3-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=probability-allow-fixed-entries#ecd9ebe0dfb3abc09ab015d78774a30794d779d0 - pypi: https://files.pythonhosted.org/packages/fe/16/00261f20f467b9e8950a76ec1749f01359bf47f2fc3dac5e206de99835c0/optree-0.19.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/c2/8b/ecdad52d0b38d4b8f514be603e69ccd5eacf4e7241f972e37e79792212ec/orjson-3.11.7-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/55/77/6ea82043db22cb0f2bbfe7198da3544000ddaadb12d26be36e19b03a2dc5/pandas-3.0.1-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl @@ -302,6 +322,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-auth-0.10.1-h2d2dd48_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-cal-0.9.13-h2c9d079_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-common-0.12.6-hb03c661_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-compression-0.3.2-h8b1a151_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-http-0.10.12-h4bacb7b_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-io-0.26.3-hc87160b_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-s3-0.11.5-h6d69fc9_5.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-sdkutils-0.2.4-h8b1a151_4.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-checksums-0.2.10-h8b1a151_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.18.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda @@ -311,6 +340,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-bin-1.2.0-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py314h3de4e8d_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hda65f42_9.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/c-ares-1.34.6-hb03c661_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.2.25-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 @@ -332,6 +362,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/freetype-2.14.2-ha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/h5py-3.16.0-nompi_py314hddf7a69_102.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/hdf5-2.1.0-nompi_hd4fcb43_104.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda @@ -345,6 +377,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/joblib-1.5.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda @@ -366,13 +399,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.18-h0c24ade_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.45.1-default_hbd61a6d_101.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lerc-4.1.0-hdb68285_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libaec-1.1.5-h088129d_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libblas-3.11.0-5_h4a7cf45_openblas.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlicommon-1.2.0-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlidec-1.2.0-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlienc-1.2.0-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libcblas-3.11.0-5_h0358290_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libcurl-8.19.0-hcf29cc6_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libdeflate-1.25-h17f619e_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libedit-3.1.20250104-pl5321h7949ede_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libev-4.33-hd590300_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.7.4-hecca717_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.5.2-h3435931_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libfreetype-2.14.2-ha770c72_0.conda @@ -386,17 +422,19 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/liblapack-3.11.0-5_h47877c9_openblas.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/liblzma-5.8.2-hb03c661_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libmpdec-4.0.0-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libnghttp2-1.68.1-h877daf1_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libopenblas-0.3.30-pthreads_h94d23a6_4.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libpng-1.6.55-h421ea60_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libsodium-1.0.21-h280c20c_3.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.52.0-hf4e2dac_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libssh2-1.11.1-hcf80075_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-15.2.0-h934c35e_18.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-15.2.0-hdf11a46_18.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libtiff-4.7.1-h9d88235_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.41.3-h5347b49_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libwebp-base-1.6.0-hd42ef1d_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libxcb-1.17.0-h8a09558_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.1-hb9d3cd8_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.2-h25fd6f3_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/markupsafe-3.0.3-py314h67df5f8_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py314h1194b4b_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.1-pyhd8ed1ab_0.conda @@ -448,6 +486,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/s2n-1.7.1-h1cbb8d7_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/scikit-learn-1.8.0-np2py314hf09ca88_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.17.1-py314hf07bd8e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.1.0-pyha191276_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-82.0.1-pyh332efcf_0.conda @@ -456,6 +496,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/threadpoolctl-3.6.0-pyhecae5ae_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_h366c992_103.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.4.0-pyhcf101f3_0.conda @@ -491,7 +532,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/80/e4/88778c6a23b65224e5088e68fd0924e5bde2196a26e76edb3ea3543fed6a/jax-0.9.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/76/fe/67d2c414b0860d42f4a20b1fadbe7aeffb1b3d885efebd7aedf22a4bc2a2/jaxlib-0.9.1-cp314-cp314-manylinux_2_27_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7c/25/6fe2dfc3d830ec614c5f83f88fc7472c4ed892b7f7f496367d31de4110c4/jupyter_book-2.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/4b/97/f6de8d4af54d6401d6581a686cce3e3e2371a79ba459a449104e026c08bc/kaleido-1.2.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/20/6aa79ba3570bddd1bf7e951c6123f806751e58e8cce736bad77b2cf348d7/logistro-2.0.1-py3-none-any.whl @@ -502,7 +542,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/fe/75/0b4a10da17a44cf13567d08a9c7632a285297e46253263f1ae119129d10a/narwhals-2.18.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/91/85/08c4e13a90a13c509d1fe09596dd8198338b6cfff9ee280f01ae7694889e/optimagic-0.5.3-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=probability-allow-fixed-entries#ecd9ebe0dfb3abc09ab015d78774a30794d779d0 - pypi: https://files.pythonhosted.org/packages/fe/16/00261f20f467b9e8950a76ec1749f01359bf47f2fc3dac5e206de99835c0/optree-0.19.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/c2/8b/ecdad52d0b38d4b8f514be603e69ccd5eacf4e7241f972e37e79792212ec/orjson-3.11.7-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/55/77/6ea82043db22cb0f2bbfe7198da3544000ddaadb12d26be36e19b03a2dc5/pandas-3.0.1-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl @@ -531,6 +571,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-auth-0.10.1-hcb83491_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-cal-0.9.13-h6ee9776_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-common-0.12.6-hc919400_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-compression-0.3.2-h3e7f9b5_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-http-0.10.12-h95cdebe_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-io-0.26.3-h4137820_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-s3-0.11.5-ha5d16b2_5.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-sdkutils-0.2.4-h16f91aa_4.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-checksums-0.2.10-h3e7f9b5_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.18.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda @@ -540,6 +589,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-bin-1.2.0-hc919400_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py314h3daef5d_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-hd037594_9.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/c-ares-1.34.6-hc919400_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.2.25-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 @@ -561,6 +611,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/freetype-2.14.2-hce30654_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/h5py-3.16.0-nompi_py314h658a3ac_102.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/hdf5-2.1.0-nompi_hc95e3eb_104.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda @@ -574,6 +626,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/joblib-1.5.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda @@ -593,14 +646,17 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lcms2-2.18-hdfa7624_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lerc-4.1.0-h1eee2c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libaec-1.1.5-h8664d51_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libblas-3.11.0-5_h51639a9_openblas.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libbrotlicommon-1.2.0-hc919400_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libbrotlidec-1.2.0-hc919400_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libbrotlienc-1.2.0-hc919400_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcblas-3.11.0-5_hb0561ab_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcurl-8.19.0-hd5a2499_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-22.1.1-h55c6f16_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libdeflate-1.25-hc11a715_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libedit-3.1.20250104-pl5321hafb1f1b_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libev-4.33-h93a5062_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libexpat-2.7.4-hf6b4638_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.5.2-hcf2aa1b_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libfreetype-2.14.2-hce30654_0.conda @@ -612,14 +668,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/liblapack-3.11.0-5_hd9741b5_openblas.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/liblzma-5.8.2-h8088a28_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libmpdec-4.0.0-h84a0fba_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libnghttp2-1.68.1-h8f3e76b_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libopenblas-0.3.30-openmp_ha158390_4.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libpng-1.6.55-h132b30e_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsodium-1.0.21-h1a92334_3.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.52.0-h1ae2325_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libssh2-1.11.1-h1590b86_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libtiff-4.7.1-h4030677_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libwebp-base-1.6.0-h07db88b_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libxcb-1.17.0-hdb1d25a_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.3.1-h8359307_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.3.2-h8088a28_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/llvm-openmp-22.1.0-hc7d1edf_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/markupsafe-3.0.3-py314h6e9b3f0_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/matplotlib-base-3.10.8-py314hd63e3f0_0.conda @@ -674,6 +732,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py314haad56a0_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scikit-learn-1.8.0-np2py314h15f0f0f_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.17.1-py314hfc1f868_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.1.0-pyh5552912_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-82.0.1-pyh332efcf_0.conda @@ -682,6 +741,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/threadpoolctl-3.6.0-pyhecae5ae_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h010d191_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.4.0-pyhcf101f3_0.conda @@ -716,7 +776,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/80/e4/88778c6a23b65224e5088e68fd0924e5bde2196a26e76edb3ea3543fed6a/jax-0.9.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a4/b0/f2c9caa6f545d4ecc1eab528c68c9191e40087f1bc79a6da2e29c6416510/jaxlib-0.9.1-cp314-cp314-macosx_11_0_arm64.whl - - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7c/25/6fe2dfc3d830ec614c5f83f88fc7472c4ed892b7f7f496367d31de4110c4/jupyter_book-2.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/4b/97/f6de8d4af54d6401d6581a686cce3e3e2371a79ba459a449104e026c08bc/kaleido-1.2.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/20/6aa79ba3570bddd1bf7e951c6123f806751e58e8cce736bad77b2cf348d7/logistro-2.0.1-py3-none-any.whl @@ -727,7 +786,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/fe/75/0b4a10da17a44cf13567d08a9c7632a285297e46253263f1ae119129d10a/narwhals-2.18.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/91/85/08c4e13a90a13c509d1fe09596dd8198338b6cfff9ee280f01ae7694889e/optimagic-0.5.3-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=probability-allow-fixed-entries#ecd9ebe0dfb3abc09ab015d78774a30794d779d0 - pypi: https://files.pythonhosted.org/packages/08/67/2e19866a03a6e75eb62194a5b55e1e3154ca1517478c300232b0229f8c2a/optree-0.19.0-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/e9/1e/745565dca749813db9a093c5ebc4bac1a9475c64d54b95654336ac3ed961/orjson-3.11.7-cp314-cp314-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl - pypi: https://files.pythonhosted.org/packages/72/3a/5b39b51c64159f470f1ca3b1c2a87da290657ca022f7cd11442606f607d1/pandas-3.0.1-cp314-cp314-macosx_11_0_arm64.whl @@ -755,6 +814,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-auth-0.10.1-h5d51246_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-cal-0.9.13-h46f3b43_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-common-0.12.6-hfd05255_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-compression-0.3.2-hcb3a2da_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-http-0.10.12-h612f3e8_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-io-0.26.3-h0d5b9f9_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-s3-0.11.5-h87bd87b_5.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-sdkutils-0.2.4-hcb3a2da_4.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-checksums-0.2.10-hcb3a2da_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.18.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda @@ -786,6 +854,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/freetype-2.14.2-h57928b3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/h5py-3.16.0-nompi_py314h02517ec_102.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/hdf5-2.1.0-nompi_hd96b29f_104.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda @@ -798,6 +868,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/joblib-1.5.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda @@ -817,11 +888,13 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/lcms2-2.18-hf2c6c5f_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/lerc-4.1.0-hd936e49_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libaec-1.1.5-haf901d7_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libblas-3.11.0-5_hf2e6a31_mkl.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libbrotlicommon-1.2.0-hfd05255_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libbrotlidec-1.2.0-hfd05255_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libbrotlienc-1.2.0-hfd05255_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libcblas-3.11.0-5_h2a3cdd5_mkl.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libcurl-8.19.0-h8206538_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libdeflate-1.25-h51727cc_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libexpat-2.7.4-hac47afa_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libffi-3.5.2-h3d046cb_0.conda @@ -838,13 +911,14 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/libpng-1.6.55-h7351971_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libsodium-1.0.21-h6a83c73_3.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libsqlite-3.52.0-hf5d6505_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libssh2-1.11.1-h9aa295b_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libtiff-4.7.1-h8f73337_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libwebp-base-1.6.0-h4d5522a_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libwinpthread-12.0.0.r4.gg4f2fc60ca-h57928b3_10.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libxcb-1.17.0-h0e4246c_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libxml2-16-2.15.2-h692994f_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libxml2-2.15.2-h5d26750_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.3.1-h2466b09_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.3.2-hfd05255_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/llvm-openmp-22.1.0-h4fa8253_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/markupsafe-3.0.3-py314h2359020_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py314hfa45d96_0.conda @@ -896,6 +970,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py314h9f07db2_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/scikit-learn-1.8.0-np2py314h1b5b07a_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.17.1-py314h221f224_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.1.0-pyh6dadd2b_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-82.0.1-pyh332efcf_0.conda @@ -905,6 +980,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-h3155e25_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyh6dadd2b_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/threadpoolctl-3.6.0-pyhecae5ae_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/tk-8.6.13-h6ed50ae_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.4.0-pyhcf101f3_0.conda @@ -946,7 +1022,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/80/e4/88778c6a23b65224e5088e68fd0924e5bde2196a26e76edb3ea3543fed6a/jax-0.9.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/0d/a8e27c1c434e489883c1182bd52de27775b8a78013de62e6eabf80991df5/jaxlib-0.9.1-cp314-cp314-win_amd64.whl - - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7c/25/6fe2dfc3d830ec614c5f83f88fc7472c4ed892b7f7f496367d31de4110c4/jupyter_book-2.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/4b/97/f6de8d4af54d6401d6581a686cce3e3e2371a79ba459a449104e026c08bc/kaleido-1.2.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/20/6aa79ba3570bddd1bf7e951c6123f806751e58e8cce736bad77b2cf348d7/logistro-2.0.1-py3-none-any.whl @@ -957,7 +1032,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/fe/75/0b4a10da17a44cf13567d08a9c7632a285297e46253263f1ae119129d10a/narwhals-2.18.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/91/85/08c4e13a90a13c509d1fe09596dd8198338b6cfff9ee280f01ae7694889e/optimagic-0.5.3-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=probability-allow-fixed-entries#ecd9ebe0dfb3abc09ab015d78774a30794d779d0 - pypi: https://files.pythonhosted.org/packages/7e/c3/587cc9aa8d4742cd690da79460081e7d834499e07e8b2bd2ccc4c66928df/optree-0.19.0-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/12/df/172771902943af54bf661a8d102bdf2e7f932127968080632bda6054b62c/orjson-3.11.7-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/09/f8/8ce132104074f977f907442790eaae24e27bce3b3b454e82faa3237ff098/pandas-3.0.1-cp314-cp314-win_amd64.whl @@ -994,6 +1069,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-auth-0.10.1-h2d2dd48_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-cal-0.9.13-h2c9d079_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-common-0.12.6-hb03c661_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-compression-0.3.2-h8b1a151_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-http-0.10.12-h4bacb7b_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-io-0.26.3-hc87160b_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-s3-0.11.5-h6d69fc9_5.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-sdkutils-0.2.4-h8b1a151_4.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-checksums-0.2.10-h8b1a151_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.18.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda @@ -1003,6 +1087,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-bin-1.2.0-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py314h3de4e8d_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hda65f42_9.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/c-ares-1.34.6-hb03c661_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.2.25-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 @@ -1025,6 +1110,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/freetype-2.14.2-ha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/h5py-3.16.0-nompi_py314hddf7a69_102.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/hdf5-2.1.0-nompi_hd4fcb43_104.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda @@ -1039,6 +1126,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/joblib-1.5.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda @@ -1060,13 +1148,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.18-h0c24ade_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.45.1-default_hbd61a6d_101.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lerc-4.1.0-hdb68285_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libaec-1.1.5-h088129d_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libblas-3.11.0-5_h4a7cf45_openblas.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlicommon-1.2.0-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlidec-1.2.0-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlienc-1.2.0-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libcblas-3.11.0-5_h0358290_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libcurl-8.19.0-hcf29cc6_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libdeflate-1.25-h17f619e_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libedit-3.1.20250104-pl5321h7949ede_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libev-4.33-hd590300_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.7.4-hecca717_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.5.2-h3435931_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libfreetype-2.14.2-ha770c72_0.conda @@ -1080,17 +1171,19 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/liblapack-3.11.0-5_h47877c9_openblas.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/liblzma-5.8.2-hb03c661_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libmpdec-4.0.0-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libnghttp2-1.67.0-had1ee68_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libopenblas-0.3.30-pthreads_h94d23a6_4.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libpng-1.6.55-h421ea60_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libsodium-1.0.21-h280c20c_3.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.52.0-hf4e2dac_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libssh2-1.11.1-hcf80075_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-15.2.0-h934c35e_18.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-15.2.0-hdf11a46_18.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libtiff-4.7.1-h9d88235_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.41.3-h5347b49_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libwebp-base-1.6.0-hd42ef1d_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libxcb-1.17.0-h8a09558_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.1-hb9d3cd8_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.2-h25fd6f3_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/markdown-it-py-4.0.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/markupsafe-3.0.3-py314h67df5f8_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py314h1194b4b_0.conda @@ -1147,6 +1240,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rich-14.3.3-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/s2n-1.7.1-h1cbb8d7_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/scikit-learn-1.8.0-np2py314hf09ca88_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.17.1-py314hf07bd8e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.1.0-pyha191276_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-82.0.1-pyh332efcf_0.conda @@ -1155,6 +1250,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/threadpoolctl-3.6.0-pyhecae5ae_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_h366c992_103.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.4.0-pyhcf101f3_0.conda @@ -1189,7 +1285,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/d2/d8/09bfa816572a4d83bccd6750df1926f79158b1c36c5f73786e26dbe4ee38/greenlet-3.3.2-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/80/e4/88778c6a23b65224e5088e68fd0924e5bde2196a26e76edb3ea3543fed6a/jax-0.9.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/76/fe/67d2c414b0860d42f4a20b1fadbe7aeffb1b3d885efebd7aedf22a4bc2a2/jaxlib-0.9.1-cp314-cp314-manylinux_2_27_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7c/25/6fe2dfc3d830ec614c5f83f88fc7472c4ed892b7f7f496367d31de4110c4/jupyter_book-2.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/4b/97/f6de8d4af54d6401d6581a686cce3e3e2371a79ba459a449104e026c08bc/kaleido-1.2.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/20/6aa79ba3570bddd1bf7e951c6123f806751e58e8cce736bad77b2cf348d7/logistro-2.0.1-py3-none-any.whl @@ -1198,7 +1293,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/fe/75/0b4a10da17a44cf13567d08a9c7632a285297e46253263f1ae119129d10a/narwhals-2.18.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/91/85/08c4e13a90a13c509d1fe09596dd8198338b6cfff9ee280f01ae7694889e/optimagic-0.5.3-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=probability-allow-fixed-entries#ecd9ebe0dfb3abc09ab015d78774a30794d779d0 - pypi: https://files.pythonhosted.org/packages/fe/16/00261f20f467b9e8950a76ec1749f01359bf47f2fc3dac5e206de99835c0/optree-0.19.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/c2/8b/ecdad52d0b38d4b8f514be603e69ccd5eacf4e7241f972e37e79792212ec/orjson-3.11.7-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/55/77/6ea82043db22cb0f2bbfe7198da3544000ddaadb12d26be36e19b03a2dc5/pandas-3.0.1-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl @@ -1224,6 +1319,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-auth-0.10.1-hcb83491_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-cal-0.9.13-h6ee9776_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-common-0.12.6-hc919400_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-compression-0.3.2-h3e7f9b5_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-http-0.10.12-h95cdebe_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-io-0.26.3-h4137820_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-s3-0.11.5-ha5d16b2_5.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-sdkutils-0.2.4-h16f91aa_4.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-checksums-0.2.10-h3e7f9b5_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.18.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda @@ -1233,6 +1337,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-bin-1.2.0-hc919400_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py314h3daef5d_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-hd037594_9.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/c-ares-1.34.6-hc919400_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.2.25-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 @@ -1255,6 +1360,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/freetype-2.14.2-hce30654_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/h5py-3.16.0-nompi_py314h658a3ac_102.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/hdf5-2.1.0-nompi_hc95e3eb_104.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda @@ -1269,6 +1376,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/joblib-1.5.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda @@ -1288,14 +1396,17 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lcms2-2.18-hdfa7624_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lerc-4.1.0-h1eee2c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libaec-1.1.5-h8664d51_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libblas-3.11.0-5_h51639a9_openblas.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libbrotlicommon-1.2.0-hc919400_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libbrotlidec-1.2.0-hc919400_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libbrotlienc-1.2.0-hc919400_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcblas-3.11.0-5_hb0561ab_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcurl-8.19.0-hd5a2499_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-22.1.1-h55c6f16_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libdeflate-1.25-hc11a715_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libedit-3.1.20250104-pl5321hafb1f1b_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libev-4.33-h93a5062_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libexpat-2.7.4-hf6b4638_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.5.2-hcf2aa1b_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libfreetype-2.14.2-hce30654_0.conda @@ -1307,14 +1418,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/liblapack-3.11.0-5_hd9741b5_openblas.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/liblzma-5.8.2-h8088a28_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libmpdec-4.0.0-h84a0fba_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libnghttp2-1.68.1-h8f3e76b_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libopenblas-0.3.30-openmp_ha158390_4.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libpng-1.6.55-h132b30e_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsodium-1.0.21-h1a92334_3.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.52.0-h1ae2325_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libssh2-1.11.1-h1590b86_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libtiff-4.7.1-h4030677_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libwebp-base-1.6.0-h07db88b_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libxcb-1.17.0-hdb1d25a_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.3.1-h8359307_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.3.2-h8088a28_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/llvm-openmp-22.1.0-hc7d1edf_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/markdown-it-py-4.0.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/markupsafe-3.0.3-py314h6e9b3f0_1.conda @@ -1374,6 +1487,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rich-14.3.3-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py314haad56a0_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scikit-learn-1.8.0-np2py314h15f0f0f_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.17.1-py314hfc1f868_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.1.0-pyh5552912_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-82.0.1-pyh332efcf_0.conda @@ -1382,6 +1496,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/threadpoolctl-3.6.0-pyhecae5ae_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h010d191_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.4.0-pyhcf101f3_0.conda @@ -1415,7 +1530,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/e6/ab/fb21f4c939bb440104cc2b396d3be1d9b7a9fd3c6c2a53d98c45b3d7c954/fsspec-2026.2.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/80/e4/88778c6a23b65224e5088e68fd0924e5bde2196a26e76edb3ea3543fed6a/jax-0.9.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a4/b0/f2c9caa6f545d4ecc1eab528c68c9191e40087f1bc79a6da2e29c6416510/jaxlib-0.9.1-cp314-cp314-macosx_11_0_arm64.whl - - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7c/25/6fe2dfc3d830ec614c5f83f88fc7472c4ed892b7f7f496367d31de4110c4/jupyter_book-2.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/4b/97/f6de8d4af54d6401d6581a686cce3e3e2371a79ba459a449104e026c08bc/kaleido-1.2.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/20/6aa79ba3570bddd1bf7e951c6123f806751e58e8cce736bad77b2cf348d7/logistro-2.0.1-py3-none-any.whl @@ -1424,7 +1538,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/fe/75/0b4a10da17a44cf13567d08a9c7632a285297e46253263f1ae119129d10a/narwhals-2.18.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/91/85/08c4e13a90a13c509d1fe09596dd8198338b6cfff9ee280f01ae7694889e/optimagic-0.5.3-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=probability-allow-fixed-entries#ecd9ebe0dfb3abc09ab015d78774a30794d779d0 - pypi: https://files.pythonhosted.org/packages/08/67/2e19866a03a6e75eb62194a5b55e1e3154ca1517478c300232b0229f8c2a/optree-0.19.0-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/e9/1e/745565dca749813db9a093c5ebc4bac1a9475c64d54b95654336ac3ed961/orjson-3.11.7-cp314-cp314-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl - pypi: https://files.pythonhosted.org/packages/72/3a/5b39b51c64159f470f1ca3b1c2a87da290657ca022f7cd11442606f607d1/pandas-3.0.1-cp314-cp314-macosx_11_0_arm64.whl @@ -1449,6 +1563,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-auth-0.10.1-h5d51246_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-cal-0.9.13-h46f3b43_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-common-0.12.6-hfd05255_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-compression-0.3.2-hcb3a2da_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-http-0.10.12-h612f3e8_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-io-0.26.3-h0d5b9f9_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-s3-0.11.5-h87bd87b_5.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-sdkutils-0.2.4-hcb3a2da_4.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-checksums-0.2.10-hcb3a2da_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.18.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda @@ -1480,6 +1603,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/freetype-2.14.2-h57928b3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/h5py-3.16.0-nompi_py314h02517ec_102.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/hdf5-2.1.0-nompi_hd96b29f_104.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda @@ -1493,6 +1618,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/joblib-1.5.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda @@ -1512,11 +1638,13 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/lcms2-2.18-hf2c6c5f_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/lerc-4.1.0-hd936e49_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libaec-1.1.5-haf901d7_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libblas-3.11.0-5_hf2e6a31_mkl.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libbrotlicommon-1.2.0-hfd05255_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libbrotlidec-1.2.0-hfd05255_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libbrotlienc-1.2.0-hfd05255_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libcblas-3.11.0-5_h2a3cdd5_mkl.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libcurl-8.19.0-h8206538_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libdeflate-1.25-h51727cc_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libexpat-2.7.4-hac47afa_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libffi-3.5.2-h3d046cb_0.conda @@ -1533,13 +1661,14 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/libpng-1.6.55-h7351971_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libsodium-1.0.21-h6a83c73_3.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libsqlite-3.52.0-hf5d6505_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libssh2-1.11.1-h9aa295b_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libtiff-4.7.1-h8f73337_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libwebp-base-1.6.0-h4d5522a_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libwinpthread-12.0.0.r4.gg4f2fc60ca-h57928b3_10.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libxcb-1.17.0-h0e4246c_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libxml2-16-2.15.2-h692994f_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libxml2-2.15.2-h5d26750_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.3.1-h2466b09_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.3.2-hfd05255_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/llvm-openmp-22.1.0-h4fa8253_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/markupsafe-3.0.3-py314h2359020_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py314hfa45d96_0.conda @@ -1593,6 +1722,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py314h9f07db2_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/scikit-learn-1.8.0-np2py314h1b5b07a_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.17.1-py314h221f224_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.1.0-pyh6dadd2b_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-82.0.1-pyh332efcf_0.conda @@ -1602,6 +1732,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-h3155e25_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyh6dadd2b_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/threadpoolctl-3.6.0-pyhecae5ae_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/tk-8.6.13-h6ed50ae_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.4.0-pyhcf101f3_0.conda @@ -1642,7 +1773,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/f3/ca/2101ca3d9223a1dc125140dbc063644dca76df6ff356531eb27bc267b446/greenlet-3.3.2-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/80/e4/88778c6a23b65224e5088e68fd0924e5bde2196a26e76edb3ea3543fed6a/jax-0.9.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/0d/a8e27c1c434e489883c1182bd52de27775b8a78013de62e6eabf80991df5/jaxlib-0.9.1-cp314-cp314-win_amd64.whl - - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7c/25/6fe2dfc3d830ec614c5f83f88fc7472c4ed892b7f7f496367d31de4110c4/jupyter_book-2.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/4b/97/f6de8d4af54d6401d6581a686cce3e3e2371a79ba459a449104e026c08bc/kaleido-1.2.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/20/6aa79ba3570bddd1bf7e951c6123f806751e58e8cce736bad77b2cf348d7/logistro-2.0.1-py3-none-any.whl @@ -1653,7 +1783,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/fe/75/0b4a10da17a44cf13567d08a9c7632a285297e46253263f1ae119129d10a/narwhals-2.18.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/91/85/08c4e13a90a13c509d1fe09596dd8198338b6cfff9ee280f01ae7694889e/optimagic-0.5.3-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=probability-allow-fixed-entries#ecd9ebe0dfb3abc09ab015d78774a30794d779d0 - pypi: https://files.pythonhosted.org/packages/7e/c3/587cc9aa8d4742cd690da79460081e7d834499e07e8b2bd2ccc4c66928df/optree-0.19.0-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/12/df/172771902943af54bf661a8d102bdf2e7f932127968080632bda6054b62c/orjson-3.11.7-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/09/f8/8ce132104074f977f907442790eaae24e27bce3b3b454e82faa3237ff098/pandas-3.0.1-cp314-cp314-win_amd64.whl @@ -1688,6 +1818,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-auth-0.10.1-h2d2dd48_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-cal-0.9.13-h2c9d079_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-common-0.12.6-hb03c661_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-compression-0.3.2-h8b1a151_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-http-0.10.12-h4bacb7b_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-io-0.26.3-hc87160b_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-s3-0.11.5-h6d69fc9_5.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-sdkutils-0.2.4-h8b1a151_4.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-checksums-0.2.10-h8b1a151_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.18.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda @@ -1725,6 +1864,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/gnutls-3.8.11-h18acefa_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/h5py-3.16.0-nompi_py314hddf7a69_102.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/hdf5-2.1.0-nompi_hd4fcb43_104.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda @@ -1739,6 +1880,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/joblib-1.5.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda @@ -1760,6 +1902,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.18-h0c24ade_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.45.1-default_hbd61a6d_101.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lerc-4.1.0-hdb68285_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libaec-1.1.5-h088129d_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libarchive-3.8.6-gpl_hc2c16d8_100.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libblas-3.11.0-5_h4a7cf45_openblas.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlicommon-1.2.0-hb03c661_1.conda @@ -1803,7 +1946,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/libxcb-1.17.0-h8a09558_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libxml2-16-2.15.2-hca6bf5a_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libxml2-2.15.2-he237659_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.1-hb9d3cd8_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.2-h25fd6f3_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/linkify-it-py-2.1.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lz4-c-1.10.0-h5888daf_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lzo-2.10-h280c20c_1002.conda @@ -1870,6 +2013,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rich-14.3.3-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/s2n-1.7.1-h1cbb8d7_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/scikit-learn-1.8.0-np2py314hf09ca88_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.17.1-py314hf07bd8e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.1.0-pyha191276_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-82.0.1-pyh332efcf_0.conda @@ -1880,6 +2025,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/textual-8.1.1-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/threadpoolctl-3.6.0-pyhecae5ae_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_h366c992_103.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.4.0-pyhcf101f3_0.conda @@ -1897,6 +2043,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/websocket-client-1.9.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/xlrd-2.0.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/xorg-libxau-1.0.12-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/xorg-libxdmcp-1.1.5-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/yaml-0.2.5-h280c20c_3.conda @@ -1915,7 +2062,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/d2/d8/09bfa816572a4d83bccd6750df1926f79158b1c36c5f73786e26dbe4ee38/greenlet-3.3.2-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/80/e4/88778c6a23b65224e5088e68fd0924e5bde2196a26e76edb3ea3543fed6a/jax-0.9.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/76/fe/67d2c414b0860d42f4a20b1fadbe7aeffb1b3d885efebd7aedf22a4bc2a2/jaxlib-0.9.1-cp314-cp314-manylinux_2_27_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7c/25/6fe2dfc3d830ec614c5f83f88fc7472c4ed892b7f7f496367d31de4110c4/jupyter_book-2.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/4b/97/f6de8d4af54d6401d6581a686cce3e3e2371a79ba459a449104e026c08bc/kaleido-1.2.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/20/6aa79ba3570bddd1bf7e951c6123f806751e58e8cce736bad77b2cf348d7/logistro-2.0.1-py3-none-any.whl @@ -1924,7 +2070,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/fe/75/0b4a10da17a44cf13567d08a9c7632a285297e46253263f1ae119129d10a/narwhals-2.18.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/91/85/08c4e13a90a13c509d1fe09596dd8198338b6cfff9ee280f01ae7694889e/optimagic-0.5.3-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=probability-allow-fixed-entries#ecd9ebe0dfb3abc09ab015d78774a30794d779d0 - pypi: https://files.pythonhosted.org/packages/fe/16/00261f20f467b9e8950a76ec1749f01359bf47f2fc3dac5e206de99835c0/optree-0.19.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/c2/8b/ecdad52d0b38d4b8f514be603e69ccd5eacf4e7241f972e37e79792212ec/orjson-3.11.7-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/55/77/6ea82043db22cb0f2bbfe7198da3544000ddaadb12d26be36e19b03a2dc5/pandas-3.0.1-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl @@ -1950,6 +2096,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-auth-0.10.1-hcb83491_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-cal-0.9.13-h6ee9776_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-common-0.12.6-hc919400_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-compression-0.3.2-h3e7f9b5_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-http-0.10.12-h95cdebe_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-io-0.26.3-h4137820_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-s3-0.11.5-ha5d16b2_5.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-sdkutils-0.2.4-h16f91aa_4.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-checksums-0.2.10-h3e7f9b5_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.18.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda @@ -1959,6 +2114,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-bin-1.2.0-hc919400_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py314h3daef5d_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-hd037594_9.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/c-ares-1.34.6-hc919400_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.2.25-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 @@ -1983,6 +2139,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/freetype-2.14.2-hce30654_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/h5py-3.16.0-nompi_py314h658a3ac_102.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/hdf5-2.1.0-nompi_hc95e3eb_104.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda @@ -1997,6 +2155,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/joblib-1.5.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda @@ -2016,14 +2175,17 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lcms2-2.18-hdfa7624_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lerc-4.1.0-h1eee2c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libaec-1.1.5-h8664d51_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libblas-3.11.0-5_h51639a9_openblas.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libbrotlicommon-1.2.0-hc919400_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libbrotlidec-1.2.0-hc919400_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libbrotlienc-1.2.0-hc919400_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcblas-3.11.0-5_hb0561ab_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcurl-8.19.0-hd5a2499_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-22.1.1-h55c6f16_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libdeflate-1.25-hc11a715_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libedit-3.1.20250104-pl5321hafb1f1b_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libev-4.33-h93a5062_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libexpat-2.7.4-hf6b4638_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.5.2-hcf2aa1b_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libfreetype-2.14.2-hce30654_0.conda @@ -2035,14 +2197,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/liblapack-3.11.0-5_hd9741b5_openblas.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/liblzma-5.8.2-h8088a28_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libmpdec-4.0.0-h84a0fba_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libnghttp2-1.68.1-h8f3e76b_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libopenblas-0.3.30-openmp_ha158390_4.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libpng-1.6.55-h132b30e_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsodium-1.0.21-h1a92334_3.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.52.0-h1ae2325_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libssh2-1.11.1-h1590b86_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libtiff-4.7.1-h4030677_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libwebp-base-1.6.0-h07db88b_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libxcb-1.17.0-hdb1d25a_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.3.1-h8359307_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.3.2-h8088a28_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/linkify-it-py-2.1.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/llvm-openmp-22.1.0-hc7d1edf_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lz4-c-1.10.0-h286801f_1.conda @@ -2109,6 +2273,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rich-14.3.3-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py314haad56a0_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scikit-learn-1.8.0-np2py314h15f0f0f_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.17.1-py314hfc1f868_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.1.0-pyh5552912_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-82.0.1-pyh332efcf_0.conda @@ -2119,6 +2284,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/textual-8.1.1-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/threadpoolctl-3.6.0-pyhecae5ae_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h010d191_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.4.0-pyhcf101f3_0.conda @@ -2136,6 +2302,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/websocket-client-1.9.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/xlrd-2.0.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/xorg-libxau-1.0.12-hc919400_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/xorg-libxdmcp-1.1.5-hc919400_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/yaml-0.2.5-h925e9cb_3.conda @@ -2153,7 +2320,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/e6/ab/fb21f4c939bb440104cc2b396d3be1d9b7a9fd3c6c2a53d98c45b3d7c954/fsspec-2026.2.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/80/e4/88778c6a23b65224e5088e68fd0924e5bde2196a26e76edb3ea3543fed6a/jax-0.9.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a4/b0/f2c9caa6f545d4ecc1eab528c68c9191e40087f1bc79a6da2e29c6416510/jaxlib-0.9.1-cp314-cp314-macosx_11_0_arm64.whl - - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7c/25/6fe2dfc3d830ec614c5f83f88fc7472c4ed892b7f7f496367d31de4110c4/jupyter_book-2.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/4b/97/f6de8d4af54d6401d6581a686cce3e3e2371a79ba459a449104e026c08bc/kaleido-1.2.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/20/6aa79ba3570bddd1bf7e951c6123f806751e58e8cce736bad77b2cf348d7/logistro-2.0.1-py3-none-any.whl @@ -2162,7 +2328,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/fe/75/0b4a10da17a44cf13567d08a9c7632a285297e46253263f1ae119129d10a/narwhals-2.18.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/91/85/08c4e13a90a13c509d1fe09596dd8198338b6cfff9ee280f01ae7694889e/optimagic-0.5.3-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=probability-allow-fixed-entries#ecd9ebe0dfb3abc09ab015d78774a30794d779d0 - pypi: https://files.pythonhosted.org/packages/08/67/2e19866a03a6e75eb62194a5b55e1e3154ca1517478c300232b0229f8c2a/optree-0.19.0-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/e9/1e/745565dca749813db9a093c5ebc4bac1a9475c64d54b95654336ac3ed961/orjson-3.11.7-cp314-cp314-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl - pypi: https://files.pythonhosted.org/packages/72/3a/5b39b51c64159f470f1ca3b1c2a87da290657ca022f7cd11442606f607d1/pandas-3.0.1-cp314-cp314-macosx_11_0_arm64.whl @@ -2187,6 +2353,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-auth-0.10.1-h5d51246_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-cal-0.9.13-h46f3b43_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-common-0.12.6-hfd05255_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-compression-0.3.2-hcb3a2da_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-http-0.10.12-h612f3e8_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-io-0.26.3-h0d5b9f9_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-s3-0.11.5-h87bd87b_5.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-sdkutils-0.2.4-hcb3a2da_4.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-checksums-0.2.10-hcb3a2da_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.18.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda @@ -2220,6 +2395,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/freetype-2.14.2-h57928b3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/h5py-3.16.0-nompi_py314h02517ec_102.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/hdf5-2.1.0-nompi_hd96b29f_104.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda @@ -2233,6 +2410,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/joblib-1.5.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda @@ -2252,11 +2430,13 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/lcms2-2.18-hf2c6c5f_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/lerc-4.1.0-hd936e49_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libaec-1.1.5-haf901d7_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libblas-3.11.0-5_hf2e6a31_mkl.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libbrotlicommon-1.2.0-hfd05255_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libbrotlidec-1.2.0-hfd05255_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libbrotlienc-1.2.0-hfd05255_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libcblas-3.11.0-5_h2a3cdd5_mkl.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libcurl-8.19.0-h8206538_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libdeflate-1.25-h51727cc_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libexpat-2.7.4-hac47afa_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libffi-3.5.2-h3d046cb_0.conda @@ -2273,13 +2453,14 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/libpng-1.6.55-h7351971_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libsodium-1.0.21-h6a83c73_3.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libsqlite-3.52.0-hf5d6505_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libssh2-1.11.1-h9aa295b_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libtiff-4.7.1-h8f73337_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libwebp-base-1.6.0-h4d5522a_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libwinpthread-12.0.0.r4.gg4f2fc60ca-h57928b3_10.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libxcb-1.17.0-h0e4246c_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libxml2-16-2.15.2-h692994f_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libxml2-2.15.2-h5d26750_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.3.1-h2466b09_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.3.2-hfd05255_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/llvm-openmp-22.1.0-h4fa8253_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/markupsafe-3.0.3-py314h2359020_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py314hfa45d96_0.conda @@ -2335,6 +2516,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py314h9f07db2_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/scikit-learn-1.8.0-np2py314h1b5b07a_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.17.1-py314h221f224_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.1.0-pyh6dadd2b_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-82.0.1-pyh332efcf_0.conda @@ -2345,6 +2527,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-h3155e25_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyh6dadd2b_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/threadpoolctl-3.6.0-pyhecae5ae_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/tk-8.6.13-h6ed50ae_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.4.0-pyhcf101f3_0.conda @@ -2367,6 +2550,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/websocket-client-1.9.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/win_inet_pton-1.1.0-pyh7428d3b_8.conda - conda: https://conda.anaconda.org/conda-forge/win-64/winpty-0.4.3-4.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/xlrd-2.0.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/xorg-libxau-1.0.12-hba3369d_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/xorg-libxdmcp-1.1.5-hba3369d_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/yaml-0.2.5-h6a83c73_3.conda @@ -2385,7 +2569,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/f3/ca/2101ca3d9223a1dc125140dbc063644dca76df6ff356531eb27bc267b446/greenlet-3.3.2-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/80/e4/88778c6a23b65224e5088e68fd0924e5bde2196a26e76edb3ea3543fed6a/jax-0.9.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/0d/a8e27c1c434e489883c1182bd52de27775b8a78013de62e6eabf80991df5/jaxlib-0.9.1-cp314-cp314-win_amd64.whl - - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7c/25/6fe2dfc3d830ec614c5f83f88fc7472c4ed892b7f7f496367d31de4110c4/jupyter_book-2.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/4b/97/f6de8d4af54d6401d6581a686cce3e3e2371a79ba459a449104e026c08bc/kaleido-1.2.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/20/6aa79ba3570bddd1bf7e951c6123f806751e58e8cce736bad77b2cf348d7/logistro-2.0.1-py3-none-any.whl @@ -2396,7 +2579,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/fe/75/0b4a10da17a44cf13567d08a9c7632a285297e46253263f1ae119129d10a/narwhals-2.18.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/91/85/08c4e13a90a13c509d1fe09596dd8198338b6cfff9ee280f01ae7694889e/optimagic-0.5.3-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=probability-allow-fixed-entries#ecd9ebe0dfb3abc09ab015d78774a30794d779d0 - pypi: https://files.pythonhosted.org/packages/7e/c3/587cc9aa8d4742cd690da79460081e7d834499e07e8b2bd2ccc4c66928df/optree-0.19.0-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/12/df/172771902943af54bf661a8d102bdf2e7f932127968080632bda6054b62c/orjson-3.11.7-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/09/f8/8ce132104074f977f907442790eaae24e27bce3b3b454e82faa3237ff098/pandas-3.0.1-cp314-cp314-win_amd64.whl @@ -2431,6 +2614,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-auth-0.10.1-h2d2dd48_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-cal-0.9.13-h2c9d079_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-common-0.12.6-hb03c661_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-compression-0.3.2-h8b1a151_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-http-0.10.12-h4bacb7b_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-io-0.26.3-hc87160b_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-s3-0.11.5-h6d69fc9_5.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-sdkutils-0.2.4-h8b1a151_4.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-checksums-0.2.10-h8b1a151_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.18.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda @@ -2493,6 +2685,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/gxx_linux-64-14.3.0-he467f4b_21.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/h5py-3.16.0-nompi_py314hddf7a69_102.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/hdf5-2.1.0-nompi_hd4fcb43_104.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda @@ -2507,6 +2701,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/joblib-1.5.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda @@ -2529,6 +2724,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.18-h0c24ade_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.45.1-default_hbd61a6d_101.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lerc-4.1.0-hdb68285_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libaec-1.1.5-h088129d_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libarchive-3.8.6-gpl_hc2c16d8_100.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libblas-3.11.0-5_h4a7cf45_openblas.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlicommon-1.2.0-hb03c661_1.conda @@ -2577,7 +2773,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/libxcb-1.17.0-h8a09558_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libxml2-16-2.15.2-hca6bf5a_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libxml2-2.15.2-he237659_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.1-hb9d3cd8_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.2-h25fd6f3_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/linkify-it-py-2.1.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lz4-c-1.10.0-h5888daf_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lzo-2.10-h280c20c_1002.conda @@ -2644,6 +2840,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rich-14.3.3-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/s2n-1.7.1-h1cbb8d7_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/scikit-learn-1.8.0-np2py314hf09ca88_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.17.1-py314hf07bd8e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.1.0-pyha191276_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-82.0.1-pyh332efcf_0.conda @@ -2655,6 +2853,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/sysroot_linux-64-2.28-h4ee821c_9.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/textual-8.1.1-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/threadpoolctl-3.6.0-pyhecae5ae_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_h366c992_103.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.4.0-pyhcf101f3_0.conda @@ -2672,6 +2871,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/websocket-client-1.9.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/xlrd-2.0.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/xorg-libxau-1.0.12-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/xorg-libxdmcp-1.1.5-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/yaml-0.2.5-h280c20c_3.conda @@ -2692,7 +2892,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/a7/2c/8ddb471091b46de99bba7eaa7f4e3983f9c8e74e310e585ff08915ce8b7a/jax_cuda12_pjrt-0.9.1-py3-none-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/17/e9/3632d7eddb8f282b50bf7c095bba9de91aae0de9baea56d1699d982fe5f8/jax_cuda12_plugin-0.9.1-cp314-cp314-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/76/fe/67d2c414b0860d42f4a20b1fadbe7aeffb1b3d885efebd7aedf22a4bc2a2/jaxlib-0.9.1-cp314-cp314-manylinux_2_27_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7c/25/6fe2dfc3d830ec614c5f83f88fc7472c4ed892b7f7f496367d31de4110c4/jupyter_book-2.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/4b/97/f6de8d4af54d6401d6581a686cce3e3e2371a79ba459a449104e026c08bc/kaleido-1.2.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/20/6aa79ba3570bddd1bf7e951c6123f806751e58e8cce736bad77b2cf348d7/logistro-2.0.1-py3-none-any.whl @@ -2714,7 +2913,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/46/0c/c75bbfb967457a0b7670b8ad267bfc4fffdf341c074e0a80db06c24ccfd4/nvidia_nvjitlink_cu12-12.9.86-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl - pypi: https://files.pythonhosted.org/packages/44/6a/cf1265d48719852f5144055ff611d9e71678a9b29afb7ace72bf248a0cd8/nvidia_nvshmem_cu12-3.5.21-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/91/85/08c4e13a90a13c509d1fe09596dd8198338b6cfff9ee280f01ae7694889e/optimagic-0.5.3-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=probability-allow-fixed-entries#ecd9ebe0dfb3abc09ab015d78774a30794d779d0 - pypi: https://files.pythonhosted.org/packages/fe/16/00261f20f467b9e8950a76ec1749f01359bf47f2fc3dac5e206de99835c0/optree-0.19.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/c2/8b/ecdad52d0b38d4b8f514be603e69ccd5eacf4e7241f972e37e79792212ec/orjson-3.11.7-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/55/77/6ea82043db22cb0f2bbfe7198da3544000ddaadb12d26be36e19b03a2dc5/pandas-3.0.1-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl @@ -2747,6 +2946,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-auth-0.10.1-h2d2dd48_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-cal-0.9.13-h2c9d079_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-common-0.12.6-hb03c661_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-compression-0.3.2-h8b1a151_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-http-0.10.12-h4bacb7b_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-io-0.26.3-hc87160b_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-s3-0.11.5-h6d69fc9_5.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-sdkutils-0.2.4-h8b1a151_4.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-checksums-0.2.10-h8b1a151_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.18.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda @@ -2809,6 +3017,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/gxx_linux-64-14.3.0-he467f4b_21.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/h5py-3.16.0-nompi_py314hddf7a69_102.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/hdf5-2.1.0-nompi_hd4fcb43_104.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda @@ -2823,6 +3033,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/joblib-1.5.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda @@ -2845,6 +3056,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.18-h0c24ade_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.45.1-default_hbd61a6d_101.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lerc-4.1.0-hdb68285_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libaec-1.1.5-h088129d_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libarchive-3.8.6-gpl_hc2c16d8_100.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libblas-3.11.0-5_h4a7cf45_openblas.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlicommon-1.2.0-hb03c661_1.conda @@ -2893,7 +3105,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/libxcb-1.17.0-h8a09558_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libxml2-16-2.15.2-hca6bf5a_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libxml2-2.15.2-he237659_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.1-hb9d3cd8_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.2-h25fd6f3_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/linkify-it-py-2.1.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lz4-c-1.10.0-h5888daf_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lzo-2.10-h280c20c_1002.conda @@ -2960,6 +3172,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rich-14.3.3-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/s2n-1.7.1-h1cbb8d7_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/scikit-learn-1.8.0-np2py314hf09ca88_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.17.1-py314hf07bd8e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.1.0-pyha191276_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-82.0.1-pyh332efcf_0.conda @@ -2971,6 +3185,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/sysroot_linux-64-2.28-h4ee821c_9.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/textual-8.1.1-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/threadpoolctl-3.6.0-pyhecae5ae_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_h366c992_103.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.4.0-pyhcf101f3_0.conda @@ -2988,6 +3203,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/websocket-client-1.9.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/xlrd-2.0.2-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/xorg-libxau-1.0.12-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/xorg-libxdmcp-1.1.5-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/yaml-0.2.5-h280c20c_3.conda @@ -3008,7 +3224,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/a7/2c/8ddb471091b46de99bba7eaa7f4e3983f9c8e74e310e585ff08915ce8b7a/jax_cuda12_pjrt-0.9.1-py3-none-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/17/e9/3632d7eddb8f282b50bf7c095bba9de91aae0de9baea56d1699d982fe5f8/jax_cuda12_plugin-0.9.1-cp314-cp314-manylinux_2_27_x86_64.whl - pypi: https://files.pythonhosted.org/packages/76/fe/67d2c414b0860d42f4a20b1fadbe7aeffb1b3d885efebd7aedf22a4bc2a2/jaxlib-0.9.1-cp314-cp314-manylinux_2_27_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7c/25/6fe2dfc3d830ec614c5f83f88fc7472c4ed892b7f7f496367d31de4110c4/jupyter_book-2.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/4b/97/f6de8d4af54d6401d6581a686cce3e3e2371a79ba459a449104e026c08bc/kaleido-1.2.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/20/6aa79ba3570bddd1bf7e951c6123f806751e58e8cce736bad77b2cf348d7/logistro-2.0.1-py3-none-any.whl @@ -3030,7 +3245,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/46/0c/c75bbfb967457a0b7670b8ad267bfc4fffdf341c074e0a80db06c24ccfd4/nvidia_nvjitlink_cu12-12.9.86-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl - pypi: https://files.pythonhosted.org/packages/44/6a/cf1265d48719852f5144055ff611d9e71678a9b29afb7ace72bf248a0cd8/nvidia_nvshmem_cu12-3.5.21-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/91/85/08c4e13a90a13c509d1fe09596dd8198338b6cfff9ee280f01ae7694889e/optimagic-0.5.3-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=probability-allow-fixed-entries#ecd9ebe0dfb3abc09ab015d78774a30794d779d0 - pypi: https://files.pythonhosted.org/packages/fe/16/00261f20f467b9e8950a76ec1749f01359bf47f2fc3dac5e206de99835c0/optree-0.19.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/c2/8b/ecdad52d0b38d4b8f514be603e69ccd5eacf4e7241f972e37e79792212ec/orjson-3.11.7-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/55/77/6ea82043db22cb0f2bbfe7198da3544000ddaadb12d26be36e19b03a2dc5/pandas-3.0.1-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl @@ -3045,6 +3260,341 @@ environments: - pypi: https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/dd/1a/5d9a402b39ec892d856bbdd9db502ff73ce28cdf4aff72eb1ce1d6843506/universal_pathlib-0.3.10-py3-none-any.whl - pypi: ./ + tests-cuda13: + channels: + - url: https://conda.anaconda.org/conda-forge/ + indexes: + - https://pypi.org/simple + options: + pypi-prerelease-mode: if-necessary-or-explicit + packages: + linux-64: + - conda: https://conda.anaconda.org/conda-forge/linux-64/_openmp_mutex-4.5-20_gnu.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/_python_abi3_support-1.0-hd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.13.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-25.1.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-25.1.0-py314h5bd0f2a_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/arrow-1.4.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-26.1.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-auth-0.10.1-ha62d5e7_3.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-cal-0.9.13-h2c9d079_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-common-0.12.6-hb03c661_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-compression-0.3.2-h8b1a151_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-http-0.10.13-h4bacb7b_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-io-0.26.3-h692f434_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-s3-0.12.2-he6ee468_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-sdkutils-0.2.4-h8b1a151_4.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-checksums-0.2.10-h8b1a151_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.18.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.4.0-py314h680f03e_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/binutils_impl_linux-64-2.45.1-default_hfdba357_102.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/binutils_linux-64-2.45.1-default_h4852527_102.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-with-css-6.3.0-hbca2aae_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-1.2.0-hed03a55_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-bin-1.2.0-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py314h3de4e8d_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hda65f42_9.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/c-ares-1.34.6-hb03c661_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.4.22-hbd8a1cb_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.4.22-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py314h4a8dc5f_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.7-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/comm-0.2.3-pyhe01879c_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.3.3-py314h97ea11e_4.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/coverage-7.13.5-py314h67df5f8_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.4-py314hd8ed1ab_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-cccl_linux-64-13.2.75-ha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-crt-dev_linux-64-13.2.78-ha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-crt-tools-13.2.78-ha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-cudart-13.2.75-hecca717_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-cudart-dev-13.2.75-hecca717_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-cudart-dev_linux-64-13.2.75-h376f20c_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-cudart-static-13.2.75-hecca717_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-cudart-static_linux-64-13.2.75-h376f20c_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-cudart_linux-64-13.2.75-h376f20c_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-driver-dev_linux-64-13.2.75-h376f20c_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvcc-13.2.78-hcdd1206_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-nvcc-dev_linux-64-13.2.78-he91c749_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvcc-impl-13.2.78-h85509e4_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvcc-tools-13.2.78-he02047a_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvcc_linux-64-13.2.78-hb2fc203_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-nvvm-dev_linux-64-13.2.78-ha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvvm-impl-13.2.78-h4bc722e_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvvm-tools-13.2.78-h4bc722e_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-version-13.2-he2cc418_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.8.20-py314h42812f9_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/decorator-5.2.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/linux-64/elfutils-0.194-h849f50c_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/exceptiongroup-1.3.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/execnet-2.1.2-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/executing-2.2.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/filterpy-1.4.5-pyhd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.62.1-pyh7db6752_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/freetype-2.14.3-ha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/gcc_impl_linux-64-15.2.0-he0086c7_19.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/gcc_linux-64-15.2.0-h7be306e_24.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/gmp-6.3.0-hac33072_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/gnutls-3.8.13-h18acefa_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/gxx_impl_linux-64-15.2.0-hda75c37_19.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/gxx_linux-64-15.2.0-he30e93d_24.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/h5py-3.16.0-nompi_py314hddf7a69_102.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/hdf5-2.1.0-nompi_h87a9417_105.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/hyperframe-6.1.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/icu-78.3-h33c6efd_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.13-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.8.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/iniconfig-2.3.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ipykernel-7.2.0-pyha191276_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ipython-9.13.0-pyh53cf698_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ipython_pygments_lexers-1.1.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/joblib-1.5.3-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.14.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.1.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-specifications-2025.9.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-with-format-nongpl-4.26.0-hcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter-lsp-2.3.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_client-8.8.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_core-5.9.1-pyhc90fa1f_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_events-0.12.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_server-2.18.2-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_server_terminals-0.5.4-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab-4.5.7-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_server-2.28.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/kernel-headers_linux-64-4.18.0-he073ed8_9.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/keyutils-1.6.3-hb9d3cd8_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.5.0-py314h97ea11e_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/krb5-1.22.2-ha1258a1_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.19.1-h0c24ade_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.45.1-default_hbd61a6d_102.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/lerc-4.1.0-hdb68285_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libaec-1.1.5-h088129d_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libarchive-3.8.7-gpl_hc2c16d8_100.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libblas-3.11.0-6_h4a7cf45_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlicommon-1.2.0-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlidec-1.2.0-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlienc-1.2.0-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libcblas-3.11.0-6_h0358290_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libcurl-8.20.0-hcf29cc6_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libdeflate-1.25-h17f619e_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libedit-3.1.20250104-pl5321h7949ede_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libev-4.33-hd590300_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.8.0-hecca717_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.5.2-h3435931_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libfreetype-2.14.3-ha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libfreetype6-2.14.3-h73754d4_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-15.2.0-he0feb66_19.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/libgcc-devel_linux-64-15.2.0-hcc6f6b0_119.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-15.2.0-h69a702a_19.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgfortran-15.2.0-h69a702a_19.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgfortran5-15.2.0-h68bc16d_19.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-15.2.0-he0feb66_19.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libiconv-1.18-h3b78370_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libidn2-2.3.8-hfac485b_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libjpeg-turbo-3.1.4.1-hb03c661_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/liblapack-3.11.0-6_h47877c9_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/liblzma-5.8.3-hb03c661_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libmicrohttpd-1.0.2-hc2fc477_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libmpdec-4.0.0-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libnghttp2-1.68.1-h877daf1_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libnvptxcompiler-dev-13.2.78-ha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/libnvptxcompiler-dev_linux-64-13.2.78-ha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libopenblas-0.3.32-pthreads_h94d23a6_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libpng-1.6.58-h421ea60_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libsanitizer-15.2.0-h90f66d4_19.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libsodium-1.0.21-h280c20c_3.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.53.1-h0c1763c_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libssh2-1.11.1-hcf80075_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-15.2.0-h934c35e_19.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/libstdcxx-devel_linux-64-15.2.0-hd446a21_119.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-15.2.0-hdf11a46_19.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libtasn1-4.21.0-hb03c661_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libtiff-4.7.1-h9d88235_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libunistring-0.9.10-h7f98852_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/linux-64/libunwind-1.8.3-h65a8314_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.42-h5347b49_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libwebp-base-1.6.0-hd42ef1d_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libxcb-1.17.0-h8a09558_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libxml2-16-2.15.3-hca6bf5a_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libxml2-2.15.3-h49c6c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.2-h25fd6f3_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/linkify-it-py-2.1.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/lz4-c-1.10.0-h5888daf_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/lzo-2.10-h280c20c_1002.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/markdown-it-py-4.2.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/markupsafe-3.0.3-py314h67df5f8_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.9-py314h1194b4b_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.2-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/mdit-py-plugins-0.6.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/mdurl-0.1.2-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/memray-1.19.3-py314hef15ded_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nbclient-0.10.4-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nbconvert-core-7.17.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nbformat-5.10.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/ncurses-6.6-hdb14827_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.6.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/nettle-3.10.1-h4a9d5aa_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/networkx-3.6.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.4.3-py314h2b28147_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.5.4-h55fea9a_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.6.2-h35e630c_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/p11-kit-0.26.2-h3435931_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-26.2-pyhc364b38_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.7-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pexpect-4.9.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.2.0-py314h8ec4b1a_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.9.6-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pluggy-1.6.0-pyhf9edf01_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/prek-0.3.13-hb17b654_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.25.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-7.2.2-py314h0f05182_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pthread-stubs-0.4-hb9d3cd8_1002.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pybaum-0.1.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pycparser-2.22-pyh29332c3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.20.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pyparsing-3.3.2-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyha55dd90_7.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-9.0.3-pyhc364b38_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-cov-7.1.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-memray-1.8.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-xdist-3.8.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.14.4-habeac84_100_cp314.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhe01879c_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.21.2-pyhe01879c_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.4-h4df99d1_100.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-3.2.1-pyh332efcf_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2026.2-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pyyaml-6.0.3-py314h67df5f8_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pyzmq-27.1.0-py312hda471dd_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/qhull-2020.2-h434a139_5.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.3-h853b02a_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/referencing-0.37.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/requests-2.33.1-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3339-validator-0.1.4-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/rich-15.0.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/s2n-1.7.2-hc5a330e_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/scikit-learn-1.8.0-np2py314hf09ca88_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.17.1-py314hf07bd8e_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.1.0-pyha191276_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-82.0.1-pyh332efcf_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/snakeviz-2.2.2-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.1-pyhd8ed1ab_2.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.3-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/sysroot_linux-64-2.28-h4ee821c_9.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/textual-8.2.5-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/threadpoolctl-3.6.0-pyhecae5ae_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.4.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_h366c992_103.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.4.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.5-py314h5bd0f2a_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.15.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/typing-extensions-4.15.0-h396c80c_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.15.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/typing_utils-0.1.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2025c-hc9c84f9_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/uc-micro-py-2.0.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-17.0.1-py314h5bd0f2a_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/uri-template-1.3.0-pyhd8ed1ab_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.7.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.7.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-pyhd8ed1ab_3.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/websocket-client-1.9.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/xlrd-2.0.2-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/xorg-libxau-1.0.12-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/xorg-libxdmcp-1.1.5-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/yaml-0.2.5-h280c20c_3.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/zeromq-4.3.5-h41580af_10.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/zipp-3.23.1-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/zlib-ng-2.3.3-hceb46e0_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/zstd-1.5.7-hb78ec9c_6.conda + - pypi: https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/ba/6c/ff8bf52315064dbeb55cb5067e191120a5b2e58bb648d0d34cf7969dc2c2/choreographer-1.3.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/ae/44/c1221527f6a71a01ec6fbad7fa78f1d50dfa02217385cf0fa3eec7087d59/click-8.3.3-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/2c/1a/aff8bb287a4b1400f69e09a53bd65de96aa5cee5691925b38731c67fc695/click_default_group-1.2.4-py2.py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/2c/c1/a662f0a8f6e024fca239d493f278d9adf5de1c8408af46a53a76beb13534/dags-0.5.1-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/72/9f/485516087cd8c44183aaf9ab850247a28e2e4a42a4d62eab77c21f673450/flatten_dict-0.5.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/d5/0c/043d5e551459da400957a1395e0febbf771446ff34291afcbe3d8be2a279/fsspec-2026.4.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/a3/59/1bd6d7428d6ed9106efbb8c52310c60fd04f6672490f452aeaa3829aa436/greenlet-3.5.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/70/aa/dfac6d72cc35bc07e7587115b6946e333ef4ccb2e6cd26ecf639438c5d26/jax-0.10.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/21/98/77f15d81fd0637da454e453c8456d4a2b5c8b2e66823b4237ee8689152cf/jax_cuda13_pjrt-0.10.0-py3-none-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/8f/2b/5c63c29d155afdf1d7827f8c04efe8cac47fc6783d8c53959e43de879dcc/jax_cuda13_plugin-0.10.0-cp314-cp314-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/a1/8e/b2a08ffc51c93842de71f7f988865cebfa7f43d6721957812dc8cc8b9d40/jaxlib-0.10.0-cp314-cp314-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b5/83/205e7af4153d9690c3cb94fa9cea670c0d26ce7f022aaa589a9e136f1491/jupyter_book-2.1.5-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/9e/b9/a6d8bb7d228940f01885bd9f327ab7f9d366a9be775c4bf366bf9d9477ae/kaleido-1.3.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/54/20/6aa79ba3570bddd1bf7e951c6123f806751e58e8cce736bad77b2cf348d7/logistro-2.0.1-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/ce/80/7f1f1bf8c2d5dfd8e9c0e1191aa355ff8b80b5619f84d6dcc2703fa7fd5a/loky-3.5.6-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/c6/bb/82c7dcf38070b46172a517e2334e665c5bf374a262f99a283ea454bece7c/ml_dtypes-0.5.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/25/1f/cca084ca2572810fff12ea9dbdcbe39eac048f40daf4a9077b49fcbe8cee/msgspec-0.21.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/c7/e1/68c2256b69a314eba133673377ba9118c356f6342a0c02b61de449cf2bf2/narwhals-2.21.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/f8/79/0cefdaa1d9e45018a227bac64a79b92d2733cde28a8fd09c65362de08622/nvidia_cublas-13.4.1.1-py3-none-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/92/87/d23db8276b76b4a7e4a702eebdc0a70e3b56c17b4dcd980ecb0f68b022e1/nvidia_cuda_cccl-13.2.75-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/ea/78/501eee5cce9202fba2f3476529e296a7f6d003261d80b52ab0abfa09ddd6/nvidia_cuda_crt-13.2.78-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b7/2d/cbf8f6288259c502165282fdaa2b733daae98434e3f2aee2b7952ba87c6f/nvidia_cuda_cupti-13.2.75-py3-none-manylinux_2_25_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/65/0f/c7c7d538c61794130e759ad74710ab5aa8cab1f700ee1754381f8c665605/nvidia_cuda_nvcc-13.2.78-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/5f/96/237b40b171e06eb65905375c4ad5c96f78c2f861ac6e8ae7f650d95e1dfd/nvidia_cuda_nvrtc-13.2.78-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/dc/74/f1493b0774c6eaf0234512bb650e1ab90ce8f61fecf0b4aaf1fb416f571e/nvidia_cuda_runtime-13.2.75-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/57/96/ce2cb84b5e8bb94dd55f554e3454b91e9ecd6708aa27d4a7b12f287613bc/nvidia_cudnn_cu13-9.22.0.52-py3-none-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/36/3e/8d717a6e1f6e27b85b64650b1104dbcf6108c9dc7e27e9e26a0d8e936cc5/nvidia_cufft-12.2.0.46-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/6b/97/a3c41eac54c89f6aac788d2b3ccd6642b32aa6b79650af3dedb8ee7c2bfa/nvidia_cusolver-12.2.0.1-py3-none-manylinux_2_27_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b7/bd/bad43b37bcf13167637bef26399693d517b95092d742e8749eda5f4a85f3/nvidia_cusparse-12.7.10.1-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/3e/93/6d020a69fc37e57fae8a96ab0c53102d96538db256e933e914d100e5a430/nvidia_nccl_cu13-2.30.4-py3-none-manylinux_2_18_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/1e/b5/dae67f0c45516cfaff2d7fba873c7425c2866d4c9ede5c14a269d89ed79b/nvidia_nvjitlink-13.2.78-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/5d/7b/2ab033584a3339552472ac8d79543c503a0e06dd0d082448b06697e7f716/nvidia_nvshmem_cu13-3.6.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/e8/1f/930d63ccc8adcdf27bfc051a24e3e4da2cf6ef987848d6d1d642e29d704b/nvidia_nvvm-13.2.78-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=probability-allow-fixed-entries#ecd9ebe0dfb3abc09ab015d78774a30794d779d0 + - pypi: https://files.pythonhosted.org/packages/9c/1a/4834b1f2fb1847412353d7342eb7a1d001a4f3bd9d24155e057135a4aa44/optree-0.19.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/76/3e/c0b690253f0b82d86e99949af13533363acfb5432ecb5d53dd5b3bce9c34/orjson-3.11.9-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/15/88/3cdd54fa279341afa10acf8d2b503556b1375245dccc9315659f795dd2e9/pandas-3.0.2-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/b1/29/c028a0731e202035f0e2e0bfbf1a3e46ad6c628cbb17f6f1cc9eea5d9ff1/pathlib_abc-0.5.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/51/fe/53ac0cd932db5dcaf55961bc7cb7afdca8d80d8cc7406ed661f0c7dc111a/pdbp-1.8.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/90/ad/cba91b3bcf04073e4d1655a5c1710ef3f457f56f7d1b79dcc3d72f4dd912/plotly-6.7.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/d7/54/c30cb1d08258612ece1dfa72c6918998bebecb916c54fca6d806bc780f2b/pytask-0.6.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/5b/f2/44a7dd795a52d34d033b1cb1a6b1162eada650079e557e236fb6b88943be/pytask_parallel-0.5.4-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/78/91/3635cdb13318cb0a328abaa69e2b91251caad39d6779aa308098f341f6cb/simplejson-4.1.1-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/2e/84/efc7c0bf3a1c5eef81d397f6fddac855becdbb11cb38ff957888603014a7/sqlalchemy-2.0.49-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/cb/fc/8c82be70b8f96d09943360f34cfb2ecdd3035294c51bce4131eeabe56645/tabcompleter-1.4.1-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/c7/18/c86eb8e0202e32dd3df50d43d7ff9854f8e0603945ff398974c1d91ac1ef/tomli_w-1.2.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/dd/1a/5d9a402b39ec892d856bbdd9db502ff73ce28cdf4aff72eb1ce1d6843506/universal_pathlib-0.3.10-py3-none-any.whl + - pypi: ./ type-checking: channels: - url: https://conda.anaconda.org/conda-forge/ @@ -3063,6 +3613,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-auth-0.10.1-h2d2dd48_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-cal-0.9.13-h2c9d079_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-common-0.12.6-hb03c661_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-compression-0.3.2-h8b1a151_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-http-0.10.12-h4bacb7b_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-io-0.26.3-hc87160b_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-s3-0.11.5-h6d69fc9_5.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-sdkutils-0.2.4-h8b1a151_4.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/aws-checksums-0.2.10-h8b1a151_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.18.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda @@ -3072,6 +3631,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-bin-1.2.0-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.2.0-py314h3de4e8d_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hda65f42_9.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/c-ares-1.34.6-hb03c661_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.2.25-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 @@ -3094,6 +3654,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/freetype-2.14.2-ha770c72_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/h5py-3.16.0-nompi_py314hddf7a69_102.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/hdf5-2.1.0-nompi_hd4fcb43_104.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda @@ -3108,6 +3670,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/joblib-1.5.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda @@ -3129,13 +3692,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.18-h0c24ade_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.45.1-default_hbd61a6d_101.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/lerc-4.1.0-hdb68285_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libaec-1.1.5-h088129d_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libblas-3.11.0-5_h4a7cf45_openblas.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlicommon-1.2.0-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlidec-1.2.0-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libbrotlienc-1.2.0-hb03c661_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libcblas-3.11.0-5_h0358290_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libcurl-8.19.0-hcf29cc6_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libdeflate-1.25-h17f619e_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libedit-3.1.20250104-pl5321h7949ede_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libev-4.33-hd590300_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.7.4-hecca717_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.5.2-h3435931_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libfreetype-2.14.2-ha770c72_0.conda @@ -3149,17 +3715,19 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/liblapack-3.11.0-5_h47877c9_openblas.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/liblzma-5.8.2-hb03c661_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libmpdec-4.0.0-hb03c661_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libnghttp2-1.67.0-had1ee68_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libopenblas-0.3.30-pthreads_h94d23a6_4.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libpng-1.6.55-h421ea60_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libsodium-1.0.21-h280c20c_3.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.52.0-hf4e2dac_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libssh2-1.11.1-hcf80075_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-15.2.0-h934c35e_18.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-15.2.0-hdf11a46_18.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libtiff-4.7.1-h9d88235_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.41.3-h5347b49_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libwebp-base-1.6.0-hd42ef1d_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libxcb-1.17.0-h8a09558_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.1-hb9d3cd8_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.2-h25fd6f3_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/markdown-it-py-4.0.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/markupsafe-3.0.3-py314h67df5f8_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.8-py314h1194b4b_0.conda @@ -3216,6 +3784,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rich-14.3.3-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/s2n-1.7.1-h1cbb8d7_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/scikit-learn-1.8.0-np2py314hf09ca88_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.17.1-py314hf07bd8e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.1.0-pyha191276_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-82.0.1-pyh332efcf_0.conda @@ -3224,6 +3794,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/threadpoolctl-3.6.0-pyhecae5ae_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_h366c992_103.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.4.0-pyhcf101f3_0.conda @@ -3258,7 +3829,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/d2/d8/09bfa816572a4d83bccd6750df1926f79158b1c36c5f73786e26dbe4ee38/greenlet-3.3.2-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/80/e4/88778c6a23b65224e5088e68fd0924e5bde2196a26e76edb3ea3543fed6a/jax-0.9.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/76/fe/67d2c414b0860d42f4a20b1fadbe7aeffb1b3d885efebd7aedf22a4bc2a2/jaxlib-0.9.1-cp314-cp314-manylinux_2_27_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7c/25/6fe2dfc3d830ec614c5f83f88fc7472c4ed892b7f7f496367d31de4110c4/jupyter_book-2.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/4b/97/f6de8d4af54d6401d6581a686cce3e3e2371a79ba459a449104e026c08bc/kaleido-1.2.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/20/6aa79ba3570bddd1bf7e951c6123f806751e58e8cce736bad77b2cf348d7/logistro-2.0.1-py3-none-any.whl @@ -3267,7 +3837,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/fe/75/0b4a10da17a44cf13567d08a9c7632a285297e46253263f1ae119129d10a/narwhals-2.18.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/91/85/08c4e13a90a13c509d1fe09596dd8198338b6cfff9ee280f01ae7694889e/optimagic-0.5.3-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=probability-allow-fixed-entries#ecd9ebe0dfb3abc09ab015d78774a30794d779d0 - pypi: https://files.pythonhosted.org/packages/fe/16/00261f20f467b9e8950a76ec1749f01359bf47f2fc3dac5e206de99835c0/optree-0.19.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/c2/8b/ecdad52d0b38d4b8f514be603e69ccd5eacf4e7241f972e37e79792212ec/orjson-3.11.7-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/55/77/6ea82043db22cb0f2bbfe7198da3544000ddaadb12d26be36e19b03a2dc5/pandas-3.0.1-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl @@ -3296,6 +3866,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-auth-0.10.1-hcb83491_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-cal-0.9.13-h6ee9776_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-common-0.12.6-hc919400_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-compression-0.3.2-h3e7f9b5_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-http-0.10.12-h95cdebe_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-io-0.26.3-h4137820_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-s3-0.11.5-ha5d16b2_5.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-sdkutils-0.2.4-h16f91aa_4.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-checksums-0.2.10-h3e7f9b5_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.18.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda @@ -3305,6 +3884,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-bin-1.2.0-hc919400_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.2.0-py314h3daef5d_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-hd037594_9.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/c-ares-1.34.6-hc919400_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.2.25-hbd8a1cb_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 @@ -3327,6 +3907,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/freetype-2.14.2-hce30654_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/h5py-3.16.0-nompi_py314h658a3ac_102.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/hdf5-2.1.0-nompi_hc95e3eb_104.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda @@ -3341,6 +3923,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/joblib-1.5.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda @@ -3360,14 +3943,17 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lcms2-2.18-hdfa7624_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lerc-4.1.0-h1eee2c3_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libaec-1.1.5-h8664d51_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libblas-3.11.0-5_h51639a9_openblas.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libbrotlicommon-1.2.0-hc919400_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libbrotlidec-1.2.0-hc919400_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libbrotlienc-1.2.0-hc919400_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcblas-3.11.0-5_hb0561ab_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcurl-8.19.0-hd5a2499_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-22.1.1-h55c6f16_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libdeflate-1.25-hc11a715_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libedit-3.1.20250104-pl5321hafb1f1b_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libev-4.33-h93a5062_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libexpat-2.7.4-hf6b4638_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.5.2-hcf2aa1b_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libfreetype-2.14.2-hce30654_0.conda @@ -3379,14 +3965,16 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/liblapack-3.11.0-5_hd9741b5_openblas.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/liblzma-5.8.2-h8088a28_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libmpdec-4.0.0-h84a0fba_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libnghttp2-1.68.1-h8f3e76b_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libopenblas-0.3.30-openmp_ha158390_4.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libpng-1.6.55-h132b30e_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsodium-1.0.21-h1a92334_3.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.52.0-h1ae2325_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libssh2-1.11.1-h1590b86_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libtiff-4.7.1-h4030677_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libwebp-base-1.6.0-h07db88b_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libxcb-1.17.0-hdb1d25a_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.3.1-h8359307_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.3.2-h8088a28_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/llvm-openmp-22.1.0-hc7d1edf_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/markdown-it-py-4.0.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/markupsafe-3.0.3-py314h6e9b3f0_1.conda @@ -3446,6 +4034,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/rich-14.3.3-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/rpds-py-0.30.0-py314haad56a0_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scikit-learn-1.8.0-np2py314h15f0f0f_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/scipy-1.17.1-py314hfc1f868_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.1.0-pyh5552912_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-82.0.1-pyh332efcf_0.conda @@ -3454,6 +4043,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.8.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyhc90fa1f_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/threadpoolctl-3.6.0-pyhecae5ae_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h010d191_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.4.0-pyhcf101f3_0.conda @@ -3487,7 +4077,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/e6/ab/fb21f4c939bb440104cc2b396d3be1d9b7a9fd3c6c2a53d98c45b3d7c954/fsspec-2026.2.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/80/e4/88778c6a23b65224e5088e68fd0924e5bde2196a26e76edb3ea3543fed6a/jax-0.9.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a4/b0/f2c9caa6f545d4ecc1eab528c68c9191e40087f1bc79a6da2e29c6416510/jaxlib-0.9.1-cp314-cp314-macosx_11_0_arm64.whl - - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7c/25/6fe2dfc3d830ec614c5f83f88fc7472c4ed892b7f7f496367d31de4110c4/jupyter_book-2.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/4b/97/f6de8d4af54d6401d6581a686cce3e3e2371a79ba459a449104e026c08bc/kaleido-1.2.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/20/6aa79ba3570bddd1bf7e951c6123f806751e58e8cce736bad77b2cf348d7/logistro-2.0.1-py3-none-any.whl @@ -3496,7 +4085,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/fe/75/0b4a10da17a44cf13567d08a9c7632a285297e46253263f1ae119129d10a/narwhals-2.18.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/91/85/08c4e13a90a13c509d1fe09596dd8198338b6cfff9ee280f01ae7694889e/optimagic-0.5.3-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=probability-allow-fixed-entries#ecd9ebe0dfb3abc09ab015d78774a30794d779d0 - pypi: https://files.pythonhosted.org/packages/08/67/2e19866a03a6e75eb62194a5b55e1e3154ca1517478c300232b0229f8c2a/optree-0.19.0-cp314-cp314-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/e9/1e/745565dca749813db9a093c5ebc4bac1a9475c64d54b95654336ac3ed961/orjson-3.11.7-cp314-cp314-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl - pypi: https://files.pythonhosted.org/packages/72/3a/5b39b51c64159f470f1ca3b1c2a87da290657ca022f7cd11442606f607d1/pandas-3.0.1-cp314-cp314-macosx_11_0_arm64.whl @@ -3524,6 +4113,15 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/asttokens-3.0.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.2.0-pyhcf101f3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-auth-0.10.1-h5d51246_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-cal-0.9.13-h46f3b43_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-common-0.12.6-hfd05255_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-compression-0.3.2-hcb3a2da_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-http-0.10.12-h612f3e8_1.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-io-0.26.3-h0d5b9f9_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-s3-0.11.5-h87bd87b_5.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-sdkutils-0.2.4-hcb3a2da_4.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/aws-checksums-0.2.10-hcb3a2da_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.18.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda @@ -3555,6 +4153,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/freetype-2.14.2-h57928b3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/h2-4.3.0-pyhcf101f3_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/h5py-3.16.0-nompi_py314h02517ec_102.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/hdf5-2.1.0-nompi_hd96b29f_104.conda - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpcore-1.0.9-pyh29332c3_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/httpx-0.28.1-pyhd8ed1ab_0.conda @@ -3568,6 +4168,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/isoduration-20.11.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.6-pyhcf101f3_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/joblib-1.5.3-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda @@ -3587,11 +4188,13 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/lark-1.3.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/lcms2-2.18-hf2c6c5f_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/lerc-4.1.0-hd936e49_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libaec-1.1.5-haf901d7_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libblas-3.11.0-5_hf2e6a31_mkl.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libbrotlicommon-1.2.0-hfd05255_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libbrotlidec-1.2.0-hfd05255_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libbrotlienc-1.2.0-hfd05255_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libcblas-3.11.0-5_h2a3cdd5_mkl.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libcurl-8.19.0-h8206538_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libdeflate-1.25-h51727cc_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libexpat-2.7.4-hac47afa_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libffi-3.5.2-h3d046cb_0.conda @@ -3608,13 +4211,14 @@ environments: - conda: https://conda.anaconda.org/conda-forge/win-64/libpng-1.6.55-h7351971_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libsodium-1.0.21-h6a83c73_3.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libsqlite-3.52.0-hf5d6505_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libssh2-1.11.1-h9aa295b_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libtiff-4.7.1-h8f73337_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libwebp-base-1.6.0-h4d5522a_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libwinpthread-12.0.0.r4.gg4f2fc60ca-h57928b3_10.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libxcb-1.17.0-h0e4246c_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libxml2-16-2.15.2-h692994f_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libxml2-2.15.2-h5d26750_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.3.1-h2466b09_2.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.3.2-hfd05255_2.conda - conda: https://conda.anaconda.org/conda-forge/win-64/llvm-openmp-22.1.0-h4fa8253_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/markupsafe-3.0.3-py314h2359020_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/matplotlib-base-3.10.8-py314hfa45d96_0.conda @@ -3668,6 +4272,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3986-validator-0.1.1-pyh9f0ad1d_0.tar.bz2 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3987-syntax-1.1.0-pyhe01879c_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/rpds-py-0.30.0-py314h9f07db2_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/scikit-learn-1.8.0-np2py314h1b5b07a_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/scipy-1.17.1-py314h221f224_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/send2trash-2.1.0-pyh6dadd2b_1.conda - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-82.0.1-pyh332efcf_0.conda @@ -3677,6 +4282,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-h3155e25_2.conda - conda: https://conda.anaconda.org/conda-forge/noarch/terminado-0.18.1-pyh6dadd2b_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/threadpoolctl-3.6.0-pyhecae5ae_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.4.0-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/tk-8.6.13-h6ed50ae_3.conda - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.4.0-pyhcf101f3_0.conda @@ -3717,7 +4323,6 @@ environments: - pypi: https://files.pythonhosted.org/packages/f3/ca/2101ca3d9223a1dc125140dbc063644dca76df6ff356531eb27bc267b446/greenlet-3.3.2-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/80/e4/88778c6a23b65224e5088e68fd0924e5bde2196a26e76edb3ea3543fed6a/jax-0.9.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/0d/a8e27c1c434e489883c1182bd52de27775b8a78013de62e6eabf80991df5/jaxlib-0.9.1-cp314-cp314-win_amd64.whl - - pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/7c/25/6fe2dfc3d830ec614c5f83f88fc7472c4ed892b7f7f496367d31de4110c4/jupyter_book-2.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/4b/97/f6de8d4af54d6401d6581a686cce3e3e2371a79ba459a449104e026c08bc/kaleido-1.2.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/20/6aa79ba3570bddd1bf7e951c6123f806751e58e8cce736bad77b2cf348d7/logistro-2.0.1-py3-none-any.whl @@ -3728,7 +4333,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/fe/75/0b4a10da17a44cf13567d08a9c7632a285297e46253263f1ae119129d10a/narwhals-2.18.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/23/cd/066e86230ae37ed0be70aae89aabf03ca8d9f39c8aea0dec8029455b5540/opt_einsum-3.4.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/91/85/08c4e13a90a13c509d1fe09596dd8198338b6cfff9ee280f01ae7694889e/optimagic-0.5.3-py3-none-any.whl + - pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=probability-allow-fixed-entries#ecd9ebe0dfb3abc09ab015d78774a30794d779d0 - pypi: https://files.pythonhosted.org/packages/7e/c3/587cc9aa8d4742cd690da79460081e7d834499e07e8b2bd2ccc4c66928df/optree-0.19.0-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/12/df/172771902943af54bf661a8d102bdf2e7f932127968080632bda6054b62c/orjson-3.11.7-cp314-cp314-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/09/f8/8ce132104074f977f907442790eaae24e27bce3b3b454e82faa3237ff098/pandas-3.0.1-cp314-cp314-win_amd64.whl @@ -3825,6 +4430,25 @@ packages: - pkg:pypi/anyio?source=compressed-mapping size: 145175 timestamp: 1767719033569 +- conda: https://conda.anaconda.org/conda-forge/noarch/anyio-4.13.0-pyhcf101f3_0.conda + sha256: f09aed24661cd45ba54a43772504f05c0698248734f9ae8cd289d314ac89707e + md5: af2df4b9108808da3dc76710fe50eae2 + depends: + - exceptiongroup >=1.0.2 + - idna >=2.8 + - python >=3.10 + - typing_extensions >=4.5 + - python + constrains: + - trio >=0.32.0 + - uvloop >=0.22.1 + - winloop >=0.2.3 + license: MIT + license_family: MIT + purls: + - pkg:pypi/anyio?source=hash-mapping + size: 146764 + timestamp: 1774359453364 - conda: https://conda.anaconda.org/conda-forge/noarch/appnope-0.1.4-pyhd8ed1ab_1.conda sha256: 8f032b140ea4159806e4969a68b4a3c0a7cab1ad936eb958a2b5ffe5335e19bf md5: 54898d0f524c9dee622d44bbb081a8ab @@ -3937,6 +4561,19 @@ packages: - pkg:pypi/async-lru?source=hash-mapping size: 21470 timestamp: 1771623881915 +- conda: https://conda.anaconda.org/conda-forge/noarch/async-lru-2.3.0-pyhcf101f3_0.conda + sha256: ea8486637cfb89dc26dc9559921640cd1d5fd37e5e02c33d85c94572139f2efe + md5: b85e84cb64c762569cc1a760c2327e0a + depends: + - python >=3.10 + - typing_extensions >=4.0.0 + - python + license: MIT + license_family: MIT + purls: + - pkg:pypi/async-lru?source=hash-mapping + size: 22949 + timestamp: 1773926359134 - conda: https://conda.anaconda.org/conda-forge/noarch/attrs-25.4.0-pyhcf101f3_1.conda sha256: c13d5e42d187b1d0255f591b7ce91201d4ed8a5370f0d986707a802c20c9d32f md5: 537296d57ea995666c68c821b00e360b @@ -3949,65 +4586,535 @@ packages: - pkg:pypi/attrs?source=compressed-mapping size: 64759 timestamp: 1764875182184 -- conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.18.0-pyhcf101f3_1.conda - sha256: a14a9ad02101aab25570543a59c5193043b73dc311a25650134ed9e6cb691770 - md5: f1976ce927373500cc19d3c0b2c85177 +- conda: https://conda.anaconda.org/conda-forge/noarch/attrs-26.1.0-pyhcf101f3_0.conda + sha256: 1b6124230bb4e571b1b9401537ecff575b7b109cc3a21ee019f65e083b8399ab + md5: c6b0543676ecb1fb2d7643941fe375f2 depends: - python >=3.10 - python - constrains: - - pytz >=2015.7 - license: BSD-3-Clause - license_family: BSD - purls: - - pkg:pypi/babel?source=compressed-mapping - size: 7684321 - timestamp: 1772555330347 -- conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda - noarch: generic - sha256: c31ab719d256bc6f89926131e88ecd0f0c5d003fe8481852c6424f4ec6c7eb29 - md5: a2ac7763a9ac75055b68f325d3255265 - depends: - - python >=3.14 - license: BSD-3-Clause AND MIT AND EPL-2.0 - purls: [] - size: 7514 - timestamp: 1767044983590 -- conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda - sha256: bf1e71c3c0a5b024e44ff928225a0874fc3c3356ec1a0b6fe719108e6d1288f6 - md5: 5267bef8efea4127aacd1f4e1f149b6e - depends: - - python >=3.10 - - soupsieve >=1.2 - - typing-extensions license: MIT license_family: MIT purls: - - pkg:pypi/beautifulsoup4?source=hash-mapping - size: 90399 - timestamp: 1764520638652 -- conda: https://conda.anaconda.org/conda-forge/linux-64/binutils_impl_linux-64-2.45.1-default_hfdba357_101.conda - sha256: 74341b26a2b9475dc14ba3cf12432fcd10a23af285101883e720216d81d44676 - md5: 83aa53cb3f5fc849851a84d777a60551 + - pkg:pypi/attrs?source=hash-mapping + size: 64927 + timestamp: 1773935801332 +- conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-auth-0.10.1-h2d2dd48_2.conda + sha256: 292aa18fe6ab5351710e6416fbd683eaef3aa5b1b7396da9350ff08efc660e4f + md5: 675ea6d90900350b1dcfa8231a5ea2dd depends: - - ld_impl_linux-64 2.45.1 default_hbd61a6d_101 - - sysroot_linux-64 - - zstd >=1.5.7,<1.6.0a0 - license: GPL-3.0-only - license_family: GPL + - libgcc >=14 + - __glibc >=2.17,<3.0.a0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + - aws-c-sdkutils >=0.2.4,<0.2.5.0a0 + - aws-c-cal >=0.9.13,<0.9.14.0a0 + - aws-c-http >=0.10.12,<0.10.13.0a0 + - aws-c-io >=0.26.3,<0.26.4.0a0 + license: Apache-2.0 + license_family: APACHE purls: [] - size: 3744895 - timestamp: 1770267152681 -- conda: https://conda.anaconda.org/conda-forge/linux-64/binutils_linux-64-2.45.1-default_h4852527_101.conda - sha256: 4826f97d33cbe54459970a1e84500dbe0cccf8326aaf370e707372ae20ec5a47 - md5: dec96579f9a7035a59492bf6ee613b53 + size: 134426 + timestamp: 1774274932726 +- conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-auth-0.10.1-ha62d5e7_3.conda + sha256: ccbf2cc4bea4aab6e071d67ecc2743197759f6df855787e7a5f57f7973f913a2 + md5: 55eaf7066da1299d217ab32baedc7fa8 depends: - - binutils_impl_linux-64 2.45.1 default_hfdba357_101 + - libgcc >=14 + - __glibc >=2.17,<3.0.a0 + - aws-c-io >=0.26.3,<0.26.4.0a0 + - aws-c-sdkutils >=0.2.4,<0.2.5.0a0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + - aws-c-http >=0.10.13,<0.10.14.0a0 + - aws-c-cal >=0.9.13,<0.9.14.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 134427 + timestamp: 1777489423676 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-auth-0.10.1-hcb83491_2.conda + sha256: aba942578ad57e7b584434ed4e39c5ff7ed4ad3f326ac3eda26913ca343ea255 + md5: 1c701edc28f543a0e040325b223d5ca0 + depends: + - __osx >=11.0 + - aws-c-io >=0.26.3,<0.26.4.0a0 + - aws-c-sdkutils >=0.2.4,<0.2.5.0a0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + - aws-c-cal >=0.9.13,<0.9.14.0a0 + - aws-c-http >=0.10.12,<0.10.13.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 116820 + timestamp: 1774275057443 +- conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-auth-0.10.1-h5d51246_2.conda + sha256: f937d40f01493c4799a673f56d70434d6cddb2ec967cf642a39e0e04282a9a1e + md5: 908d5d8755564e2c3f3770fca7ff0736 + depends: + - vc >=14.3,<15 + - vc14_runtime >=14.44.35208 + - ucrt >=10.0.20348.0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + - aws-c-io >=0.26.3,<0.26.4.0a0 + - aws-c-http >=0.10.12,<0.10.13.0a0 + - aws-c-cal >=0.9.13,<0.9.14.0a0 + - aws-c-sdkutils >=0.2.4,<0.2.5.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 127421 + timestamp: 1774275018076 +- conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-cal-0.9.13-h2c9d079_1.conda + sha256: f21d648349a318f4ae457ea5403d542ba6c0e0343b8642038523dd612b2a5064 + md5: 3c3d02681058c3d206b562b2e3bc337f + depends: + - __glibc >=2.17,<3.0.a0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + - libgcc >=14 + - openssl >=3.5.4,<4.0a0 + license: Apache-2.0 + license_family: Apache + purls: [] + size: 56230 + timestamp: 1764593147526 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-cal-0.9.13-h6ee9776_1.conda + sha256: 13c42cb54619df0a1c3e5e5b0f7c8e575460b689084024fd23abeb443aac391b + md5: 8baab664c541d6f059e83423d9fc5e30 + depends: + - __osx >=11.0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + license: Apache-2.0 + license_family: Apache + purls: [] + size: 45233 + timestamp: 1764593742187 +- conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-cal-0.9.13-h46f3b43_1.conda + sha256: 5f61082caea9fbdd6ba02702935e9dea9997459a7e6c06fd47f21b81aac882fb + md5: 7cc4953d504d4e8f3d6f4facb8549465 + depends: + - aws-c-common >=0.12.6,<0.12.7.0a0 + - ucrt >=10.0.20348.0 + - vc >=14.3,<15 + - vc14_runtime >=14.44.35208 + license: Apache-2.0 + license_family: Apache + purls: [] + size: 53613 + timestamp: 1764593604081 +- conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-common-0.12.6-hb03c661_0.conda + sha256: 926a5b9de0a586e88669d81de717c8dd3218c51ce55658e8a16af7e7fe87c833 + md5: e36ad70a7e0b48f091ed6902f04c23b8 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + license: Apache-2.0 + license_family: Apache + purls: [] + size: 239605 + timestamp: 1763585595898 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-common-0.12.6-hc919400_0.conda + sha256: cd3817c82470826167b1d8008485676862640cff65750c34062e6c20aeac419b + md5: b759f02a7fa946ea9fd9fb035422c848 + depends: + - __osx >=11.0 + license: Apache-2.0 + license_family: Apache + purls: [] + size: 224116 + timestamp: 1763585987935 +- conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-common-0.12.6-hfd05255_0.conda + sha256: 0627691c34eb3d9fcd18c71346d9f16f83e8e58f9983e792138a2cccf387d18a + md5: b1465f33b05b9af02ad0887c01837831 + depends: + - ucrt >=10.0.20348.0 + - vc >=14.3,<15 + - vc14_runtime >=14.44.35208 + license: Apache-2.0 + license_family: Apache + purls: [] + size: 236441 + timestamp: 1763586152571 +- conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-compression-0.3.2-h8b1a151_0.conda + sha256: 1838bdc077b77168416801f4715335b65e9223f83641a2c28644f8acd8f9db0e + md5: f16f498641c9e05b645fe65902df661a + depends: + - libgcc >=14 + - __glibc >=2.17,<3.0.a0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 22278 + timestamp: 1767790836624 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-compression-0.3.2-h3e7f9b5_0.conda + sha256: ce405171612acef0924a1ff9729d556db7936ad380a81a36325b7df5405a6214 + md5: 6edccad10fc1c76a7a34b9c14efbeaa3 + depends: + - __osx >=11.0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 21470 + timestamp: 1767790900862 +- conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-compression-0.3.2-hcb3a2da_0.conda + sha256: f98fbb797d28de3ae41dbd42590549ee0a2a4e61772f9cc6d1a4fa45d47637de + md5: 0385f2340be1776b513258adaf70e208 + depends: + - vc >=14.3,<15 + - vc14_runtime >=14.44.35208 + - ucrt >=10.0.20348.0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 23087 + timestamp: 1767790877990 +- conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-http-0.10.12-h4bacb7b_1.conda + sha256: c6f910d400ef9034493988e8cd37bd4712e42d85921122bcda4ba68d4614b131 + md5: 7bc920933e5fb225aba86a788164a8f1 + depends: + - libgcc >=14 + - __glibc >=2.17,<3.0.a0 + - aws-c-io >=0.26.3,<0.26.4.0a0 + - aws-c-cal >=0.9.13,<0.9.14.0a0 + - aws-c-compression >=0.3.2,<0.3.3.0a0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 225868 + timestamp: 1774270031584 +- conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-http-0.10.13-h4bacb7b_0.conda + sha256: 38cfc8894db6729770ac18f900296c3f7c20f349a5586a8d8e1a62571fce61d5 + md5: 77f70a9ab785a146dbf66fba00131403 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + - aws-c-compression >=0.3.2,<0.3.3.0a0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + - aws-c-cal >=0.9.13,<0.9.14.0a0 + - aws-c-io >=0.26.3,<0.26.4.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 225826 + timestamp: 1774488399486 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-http-0.10.12-h95cdebe_1.conda + sha256: b25380b43c2c5733dcaac88b075fa286893af1c147ca40d50286df150ace5fb8 + md5: 806ff124512457583d675c62336b1392 + depends: + - __osx >=11.0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + - aws-c-io >=0.26.3,<0.26.4.0a0 + - aws-c-compression >=0.3.2,<0.3.3.0a0 + - aws-c-cal >=0.9.13,<0.9.14.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 172940 + timestamp: 1774270153001 +- conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-http-0.10.12-h612f3e8_1.conda + sha256: dc297fbce04335f5f80b30bcdee1925ed4a0d95e7a2382523870c6b4981ca1b2 + md5: 26af0e9d7853d27e909ce01c287692b4 + depends: + - vc >=14.3,<15 + - vc14_runtime >=14.44.35208 + - ucrt >=10.0.20348.0 + - aws-c-cal >=0.9.13,<0.9.14.0a0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + - aws-c-compression >=0.3.2,<0.3.3.0a0 + - aws-c-io >=0.26.3,<0.26.4.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 207778 + timestamp: 1774270109581 +- conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-io-0.26.3-h692f434_1.conda + sha256: e3e33031d641864128ab11f9b8585ad5beb82fa988fe833bb0767dd01878a371 + md5: 14260392d0b491c537b5e26e9a506fff + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + - aws-c-common >=0.12.6,<0.12.7.0a0 + - s2n >=1.7.2,<1.7.3.0a0 + - aws-c-cal >=0.9.13,<0.9.14.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 181583 + timestamp: 1777471132287 +- conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-io-0.26.3-hc87160b_0.conda + sha256: c66ebb7815949db72bab7c86bf477197e4bc6937c381cf32248bdd1ce496db00 + md5: dde6a3e4fe6bb2ecd2a7050dd1e701fb + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + - aws-c-cal >=0.9.13,<0.9.14.0a0 + - s2n >=1.7.1,<1.7.2.0a0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 181624 + timestamp: 1773868304737 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-io-0.26.3-h4137820_0.conda + sha256: 0e6ba2c8f250f466b9d671d3970e1f7c149c925b79c10fa7778708192a2a7833 + md5: 730d1cbd0973bd7ac150e181d3b572f3 + depends: + - __osx >=11.0 + - aws-c-cal >=0.9.13,<0.9.14.0a0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 177072 + timestamp: 1773868341204 +- conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-io-0.26.3-h0d5b9f9_0.conda + sha256: 3c9d50fb7895df4edd72d177299551608c24d8b0b82db0cf34c8e2bf6644979c + md5: ce36c60ed6b15c8dbb7ccddec4ebf57f + depends: + - vc >=14.3,<15 + - vc14_runtime >=14.44.35208 + - ucrt >=10.0.20348.0 + - aws-c-cal >=0.9.13,<0.9.14.0a0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 182296 + timestamp: 1773868342627 +- conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-s3-0.11.5-h6d69fc9_5.conda + sha256: c15869656f5fbebe27cc5aa58b23831f75d85502d324fedd7ee7e552c79b495d + md5: 4c5c16bf1133dcfe100f33dd4470998e + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + - aws-c-io >=0.26.3,<0.26.4.0a0 + - aws-checksums >=0.2.10,<0.2.11.0a0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + - aws-c-http >=0.10.12,<0.10.13.0a0 + - openssl >=3.5.5,<4.0a0 + - aws-c-auth >=0.10.1,<0.10.2.0a0 + - aws-c-cal >=0.9.13,<0.9.14.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 151340 + timestamp: 1774282148690 +- conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-s3-0.12.2-he6ee468_1.conda + sha256: 4cecb4d595b7cf558087c37b8131cae5204b2c64d75f6b951dc3731d3f872bb8 + md5: 50ae8372984b8b98e056ac8f6b70ab29 + depends: + - libgcc >=14 + - __glibc >=2.17,<3.0.a0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + - aws-checksums >=0.2.10,<0.2.11.0a0 + - aws-c-cal >=0.9.13,<0.9.14.0a0 + - aws-c-io >=0.26.3,<0.26.4.0a0 + - openssl >=3.5.6,<4.0a0 + - aws-c-http >=0.10.13,<0.10.14.0a0 + - aws-c-auth >=0.10.1,<0.10.2.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 152657 + timestamp: 1777824812393 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-s3-0.11.5-ha5d16b2_5.conda + sha256: bd8f4ffb8346dd02bda2bc1ae9993ebdb131298b1308cb9e6b1e771b530d9dd5 + md5: f33735fd60f9c4a21c51a0283eb8afc1 + depends: + - __osx >=11.0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + - aws-c-http >=0.10.12,<0.10.13.0a0 + - aws-c-auth >=0.10.1,<0.10.2.0a0 + - aws-c-cal >=0.9.13,<0.9.14.0a0 + - aws-checksums >=0.2.10,<0.2.11.0a0 + - aws-c-io >=0.26.3,<0.26.4.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 129783 + timestamp: 1774282252139 +- conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-s3-0.11.5-h87bd87b_5.conda + sha256: 62367b6d4d8aa1b43fb63e51d779bb829dfdd53d908c1b6700efa23255dd38db + md5: 2d90128559ec4b3c78d1b889b8b13b50 + depends: + - vc >=14.3,<15 + - vc14_runtime >=14.44.35208 + - ucrt >=10.0.20348.0 + - aws-c-http >=0.10.12,<0.10.13.0a0 + - aws-c-io >=0.26.3,<0.26.4.0a0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + - aws-c-auth >=0.10.1,<0.10.2.0a0 + - aws-c-cal >=0.9.13,<0.9.14.0a0 + - aws-checksums >=0.2.10,<0.2.11.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 141733 + timestamp: 1774282227215 +- conda: https://conda.anaconda.org/conda-forge/linux-64/aws-c-sdkutils-0.2.4-h8b1a151_4.conda + sha256: 9d62c5029f6f8219368a8665f0a549da572dc777f52413b7d75609cacdbc02cc + md5: c7e3e08b7b1b285524ab9d74162ce40b + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + - aws-c-common >=0.12.6,<0.12.7.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 59383 + timestamp: 1764610113765 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-c-sdkutils-0.2.4-h16f91aa_4.conda + sha256: 8a4ee03ea6e14d5a498657e5fe96875a133b4263b910c5b60176db1a1a0aaa27 + md5: 658a8236f3f1ebecaaa937b5ccd5d730 + depends: + - __osx >=11.0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 53430 + timestamp: 1764755714246 +- conda: https://conda.anaconda.org/conda-forge/win-64/aws-c-sdkutils-0.2.4-hcb3a2da_4.conda + sha256: c86c30edba7457e04d905c959328142603b62d7d1888aed893b2e21cca9c302c + md5: 3c97faee5be6fd0069410cf2bca71c85 + depends: + - vc >=14.3,<15 + - vc14_runtime >=14.44.35208 + - ucrt >=10.0.20348.0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 56509 + timestamp: 1764610148907 +- conda: https://conda.anaconda.org/conda-forge/linux-64/aws-checksums-0.2.10-h8b1a151_0.conda + sha256: 09472dd5fa4473cffd44741ee4c1112f2c76d7168d1343de53c2ad283dc1efa6 + md5: f8e1bcc5c7d839c5882e94498791be08 + depends: + - libgcc >=14 + - __glibc >=2.17,<3.0.a0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 101435 + timestamp: 1771063496927 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/aws-checksums-0.2.10-h3e7f9b5_0.conda + sha256: 06661bc848b27aa38a85d8018ace8d4f4a3069e22fa0963e2431dc6c0dc30450 + md5: 07f6c5a5238f5deeed6e985826b30de8 + depends: + - __osx >=11.0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 91917 + timestamp: 1771063496505 +- conda: https://conda.anaconda.org/conda-forge/win-64/aws-checksums-0.2.10-hcb3a2da_0.conda + sha256: 505b2365bbf3c197c9c2e007ba8262bcdaaddc970f84ce67cf73868ca2990989 + md5: 96e950e5007fb691322db578736aba52 + depends: + - vc >=14.3,<15 + - vc14_runtime >=14.44.35208 + - ucrt >=10.0.20348.0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + license: Apache-2.0 + license_family: APACHE + purls: [] + size: 116853 + timestamp: 1771063509650 +- conda: https://conda.anaconda.org/conda-forge/noarch/babel-2.18.0-pyhcf101f3_1.conda + sha256: a14a9ad02101aab25570543a59c5193043b73dc311a25650134ed9e6cb691770 + md5: f1976ce927373500cc19d3c0b2c85177 + depends: + - python >=3.10 + - python + constrains: + - pytz >=2015.7 + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/babel?source=compressed-mapping + size: 7684321 + timestamp: 1772555330347 +- conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.3.0-py314h680f03e_0.conda + noarch: generic + sha256: c31ab719d256bc6f89926131e88ecd0f0c5d003fe8481852c6424f4ec6c7eb29 + md5: a2ac7763a9ac75055b68f325d3255265 + depends: + - python >=3.14 + license: BSD-3-Clause AND MIT AND EPL-2.0 + purls: [] + size: 7514 + timestamp: 1767044983590 +- conda: https://conda.anaconda.org/conda-forge/noarch/backports.zstd-1.4.0-py314h680f03e_0.conda + noarch: generic + sha256: de1755a35258eb1b59f2288559bbf0b76da60bd2fa6cd6f768ead442f85bd666 + md5: b712198b257f378e9bd8cde277218296 + depends: + - python >=3.14 + license: BSD-3-Clause AND MIT AND EPL-2.0 + purls: [] + size: 7546 + timestamp: 1777848733980 +- conda: https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.14.3-pyha770c72_0.conda + sha256: bf1e71c3c0a5b024e44ff928225a0874fc3c3356ec1a0b6fe719108e6d1288f6 + md5: 5267bef8efea4127aacd1f4e1f149b6e + depends: + - python >=3.10 + - soupsieve >=1.2 + - typing-extensions + license: MIT + license_family: MIT + purls: + - pkg:pypi/beautifulsoup4?source=hash-mapping + size: 90399 + timestamp: 1764520638652 +- conda: https://conda.anaconda.org/conda-forge/linux-64/binutils_impl_linux-64-2.45.1-default_hfdba357_101.conda + sha256: 74341b26a2b9475dc14ba3cf12432fcd10a23af285101883e720216d81d44676 + md5: 83aa53cb3f5fc849851a84d777a60551 + depends: + - ld_impl_linux-64 2.45.1 default_hbd61a6d_101 + - sysroot_linux-64 + - zstd >=1.5.7,<1.6.0a0 + license: GPL-3.0-only + license_family: GPL + purls: [] + size: 3744895 + timestamp: 1770267152681 +- conda: https://conda.anaconda.org/conda-forge/linux-64/binutils_impl_linux-64-2.45.1-default_hfdba357_102.conda + sha256: 0a7d405064f53b9d91d92515f1460f7906ee5e8523f3cd8973430e81219f4917 + md5: 8165352fdce2d2025bf884dc0ee85700 + depends: + - ld_impl_linux-64 2.45.1 default_hbd61a6d_102 + - sysroot_linux-64 + - zstd >=1.5.7,<1.6.0a0 + license: GPL-3.0-only + license_family: GPL + purls: [] + size: 3661455 + timestamp: 1774197460085 +- conda: https://conda.anaconda.org/conda-forge/linux-64/binutils_linux-64-2.45.1-default_h4852527_101.conda + sha256: 4826f97d33cbe54459970a1e84500dbe0cccf8326aaf370e707372ae20ec5a47 + md5: dec96579f9a7035a59492bf6ee613b53 + depends: + - binutils_impl_linux-64 2.45.1 default_hfdba357_101 license: GPL-3.0-only license_family: GPL purls: [] size: 36060 timestamp: 1770267177798 +- conda: https://conda.anaconda.org/conda-forge/linux-64/binutils_linux-64-2.45.1-default_h4852527_102.conda + sha256: 78a58d523d072b7f8e591b8f8572822e044b31764ed7e8d170392e7bc6d58339 + md5: 2a307a17309d358c9b42afdd3199ddcc + depends: + - binutils_impl_linux-64 2.45.1 default_hfdba357_102 + license: GPL-3.0-only + license_family: GPL + purls: [] + size: 36304 + timestamp: 1774197485247 - conda: https://conda.anaconda.org/conda-forge/noarch/bleach-6.3.0-pyhcf101f3_1.conda sha256: f8ff1f98423674278964a46c93a1766f9e91960d44efd91c6c3ed56a33813f46 md5: 7c5ebdc286220e8021bf55e6384acd67 @@ -4208,6 +5315,16 @@ packages: purls: [] size: 207882 timestamp: 1765214722852 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/c-ares-1.34.6-hc919400_0.conda + sha256: 2995f2aed4e53725e5efbc28199b46bf311c3cab2648fc4f10c2227d6d5fa196 + md5: bcb3cba70cf1eec964a03b4ba7775f01 + depends: + - __osx >=11.0 + license: MIT + license_family: MIT + purls: [] + size: 180327 + timestamp: 1765215064054 - conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.2.25-h4c7d964_0.conda sha256: 37950019c59b99585cee5d30dbc2cc9696ed4e11f5742606a4db1621ed8f94d6 md5: f001e6e220355b7f87403a4d0e5bf1ca @@ -4226,6 +5343,15 @@ packages: purls: [] size: 147413 timestamp: 1772006283803 +- conda: https://conda.anaconda.org/conda-forge/noarch/ca-certificates-2026.4.22-hbd8a1cb_0.conda + sha256: c9dbcc8039a52023660d6d1bbf87594a93dd69c6ac5a2a44323af2c92976728d + md5: e18ad67cf881dcadee8b8d9e2f8e5f73 + depends: + - __unix + license: ISC + purls: [] + size: 131039 + timestamp: 1776865545798 - conda: https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 noarch: python sha256: 561e6660f26c35d137ee150187d89767c988413c978e1b712d53f27ddf70ea17 @@ -4258,6 +5384,16 @@ packages: - pkg:pypi/certifi?source=compressed-mapping size: 151445 timestamp: 1772001170301 +- conda: https://conda.anaconda.org/conda-forge/noarch/certifi-2026.4.22-pyhd8ed1ab_0.conda + sha256: 989db6e5957c4b44fa600c68c681ec2f36a55e48f7c7f1c073d5e91caa8cd878 + md5: 929471569c93acefb30282a22060dcd5 + depends: + - python >=3.10 + license: ISC + purls: + - pkg:pypi/certifi?source=hash-mapping + size: 135656 + timestamp: 1776866680878 - conda: https://conda.anaconda.org/conda-forge/linux-64/cffi-2.0.0-py314h4a8dc5f_1.conda sha256: c6339858a0aaf5d939e00d345c98b99e4558f285942b27232ac098ad17ac7f8e md5: cf45f4278afd6f4e6d03eda0f435d527 @@ -4317,6 +5453,17 @@ packages: - pkg:pypi/charset-normalizer?source=compressed-mapping size: 58510 timestamp: 1773660086450 +- conda: https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-3.4.7-pyhd8ed1ab_0.conda + sha256: 3f9483d62ce24ecd063f8a5a714448445dc8d9e201147c46699fc0033e824457 + md5: a9167b9571f3baa9d448faa2139d1089 + depends: + - python >=3.10 + license: MIT + license_family: MIT + purls: + - pkg:pypi/charset-normalizer?source=hash-mapping + size: 58872 + timestamp: 1775127203018 - pypi: https://files.pythonhosted.org/packages/b7/9f/d73dfb85d7a5b1a56a99adc50f2074029468168c970ff5daeade4ad819e4/choreographer-1.2.1-py3-none-any.whl name: choreographer version: 1.2.1 @@ -4325,6 +5472,15 @@ packages: - logistro>=2.0.1 - simplejson>=3.19.3 requires_python: '>=3.8' +- pypi: https://files.pythonhosted.org/packages/ba/6c/ff8bf52315064dbeb55cb5067e191120a5b2e58bb648d0d34cf7969dc2c2/choreographer-1.3.0-py3-none-any.whl + name: choreographer + version: 1.3.0 + sha256: cea4cb739e4f61625e4b53888a8d3fa1d3bf73948b56753e460ab44da7d8d44f + requires_dist: + - logistro>=2.0.1 + - platformdirs>=4.3.6 + - simplejson>=3.19.3 + requires_python: '>=3.8' - pypi: https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl name: click version: 8.3.1 @@ -4332,6 +5488,13 @@ packages: requires_dist: - colorama ; sys_platform == 'win32' requires_python: '>=3.10' +- pypi: https://files.pythonhosted.org/packages/ae/44/c1221527f6a71a01ec6fbad7fa78f1d50dfa02217385cf0fa3eec7087d59/click-8.3.3-py3-none-any.whl + name: click + version: 8.3.3 + sha256: a2bf429bb3033c89fa4936ffb35d5cb471e3719e1f3c8a7c3fff0b8314305613 + requires_dist: + - colorama ; sys_platform == 'win32' + requires_python: '>=3.10' - pypi: https://files.pythonhosted.org/packages/2c/1a/aff8bb287a4b1400f69e09a53bd65de96aa5cee5691925b38731c67fc695/click_default_group-1.2.4-py2.py3-none-any.whl name: click-default-group version: 1.2.4 @@ -4473,6 +5636,17 @@ packages: purls: [] size: 50078 timestamp: 1770674447292 +- conda: https://conda.anaconda.org/conda-forge/noarch/cpython-3.14.4-py314hd8ed1ab_100.conda + noarch: generic + sha256: 40dc224f2b718e5f034efd2332bc315a719063235f63673468d26a24770094ee + md5: f111d4cfaf1fe9496f386bc98ae94452 + depends: + - python >=3.14,<3.15.0a0 + - python_abi * *_cp314 + license: Python-2.0 + purls: [] + size: 49809 + timestamp: 1775614256655 - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-cccl_linux-64-12.9.27-ha770c72_0.conda sha256: 2ee3b9564ca326226e5cda41d11b251482df8e7c757e333d28ec75213c75d126 md5: 87ff6381e33b76e5b9b179a2cdd005ec @@ -4482,6 +5656,15 @@ packages: purls: [] size: 1150650 timestamp: 1746189825236 +- conda: https://conda.anaconda.org/conda-forge/noarch/cuda-cccl_linux-64-13.2.75-ha770c72_0.conda + sha256: afff92110ab09005b43047128d8c56b49ca96ef6425b2de8121ddf8e5d9c52fd + md5: 2a66581b5e2fba97243e6a7b3ea70061 + depends: + - cuda-version >=13.2,<13.3.0a0 + license: LicenseRef-NVIDIA-End-User-License-Agreement + purls: [] + size: 1415553 + timestamp: 1776108312905 - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-crt-dev_linux-64-12.9.86-ha770c72_2.conda sha256: e6257534c4b4b6b8a1192f84191c34906ab9968c92680fa09f639e7846a87304 md5: 79d280de61e18010df5997daea4743df @@ -4491,6 +5674,15 @@ packages: purls: [] size: 94239 timestamp: 1753975242354 +- conda: https://conda.anaconda.org/conda-forge/noarch/cuda-crt-dev_linux-64-13.2.78-ha770c72_0.conda + sha256: 5db93738a2523c418de442427ea0b5fb877fcb517e0d170b1428bdd298bcddfd + md5: 61799994af56d5ab31096a11d62d6be8 + depends: + - cuda-version >=13.2,<13.3.0a0 + license: LicenseRef-NVIDIA-End-User-License-Agreement + purls: [] + size: 97068 + timestamp: 1776121212858 - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-crt-tools-12.9.86-ha770c72_2.conda sha256: 2da9964591af14ba11b2379bed01d56e7185260ee0998d1a939add7fb752db45 md5: 503a94e20d2690d534d676a764a1852c @@ -4500,6 +5692,15 @@ packages: purls: [] size: 29138 timestamp: 1753975252445 +- conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-crt-tools-13.2.78-ha770c72_0.conda + sha256: db0517510b960a14a0efd50881ea43954b27abdbbc782a60174872585ee4d207 + md5: 2edadf855598e2f3e3e323d900fd27ab + depends: + - cuda-version >=13.2,<13.3.0a0 + license: LicenseRef-NVIDIA-End-User-License-Agreement + purls: [] + size: 30452 + timestamp: 1776121224148 - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-cudart-12.9.79-h5888daf_0.conda sha256: 57d1294ecfaf9dc8cdb5fc4be3e63ebc7614538bddb5de53cfd9b1b7de43aed5 md5: cb15315d19b58bd9cd424084e58ad081 @@ -4513,6 +5714,19 @@ packages: purls: [] size: 23242 timestamp: 1749218416505 +- conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-cudart-13.2.75-hecca717_0.conda + sha256: 633bc9ba458a12a20a42776bf3fa25cecfddc65a22e4ed207fe09b9adcd9de58 + md5: 9b7dcd83f8a965efcf7377dc54203619 + depends: + - __glibc >=2.17,<3.0.a0 + - cuda-cudart_linux-64 13.2.75 h376f20c_0 + - cuda-version >=13.2,<13.3.0a0 + - libgcc >=14 + - libstdcxx >=14 + license: LicenseRef-NVIDIA-End-User-License-Agreement + purls: [] + size: 24542 + timestamp: 1776110472025 - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-cudart-dev-12.9.79-h5888daf_0.conda sha256: 04d8235cb3cb3510c0492c3515a9d1a6053b50ef39be42b60cafb05044b5f4c6 md5: ba38a7c3b4c14625de45784b773f0c71 @@ -4528,6 +5742,21 @@ packages: purls: [] size: 23687 timestamp: 1749218464010 +- conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-cudart-dev-13.2.75-hecca717_0.conda + sha256: c11c338b24c37ae05d39ae752a661b199c6530f2f189be1cc718b23485cd8626 + md5: 145b05176a16bf8ffa64defccde19162 + depends: + - __glibc >=2.17,<3.0.a0 + - cuda-cudart 13.2.75 hecca717_0 + - cuda-cudart-dev_linux-64 13.2.75 h376f20c_0 + - cuda-cudart-static 13.2.75 hecca717_0 + - cuda-version >=13.2,<13.3.0a0 + - libgcc >=14 + - libstdcxx >=14 + license: LicenseRef-NVIDIA-End-User-License-Agreement + purls: [] + size: 25017 + timestamp: 1776110522210 - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-cudart-dev_linux-64-12.9.79-h3f2d84a_0.conda sha256: ffe86ed0144315b276f18020d836c8ef05bf971054cf7c3eb167af92494080d5 md5: 86e40eb67d83f1a58bdafdd44e5a77c6 @@ -4540,6 +5769,18 @@ packages: purls: [] size: 389140 timestamp: 1749218427266 +- conda: https://conda.anaconda.org/conda-forge/noarch/cuda-cudart-dev_linux-64-13.2.75-h376f20c_0.conda + sha256: feb6d90170dbdbbc873d065f17c55845b03e1bd132d5727ba16c9dc5048c3a98 + md5: 0104d270d83f6c3f6b4f8f761da37bf4 + depends: + - cuda-cccl_linux-64 + - cuda-cudart-static_linux-64 + - cuda-cudart_linux-64 + - cuda-version >=13.2,<13.3.0a0 + license: LicenseRef-NVIDIA-End-User-License-Agreement + purls: [] + size: 398384 + timestamp: 1776110485442 - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-cudart-static-12.9.79-h5888daf_0.conda sha256: 6261e1d9af80e1ec308e3e5e2ff825d189ef922d24093beaf6efca12e67ce060 md5: d3c4ac48f4967f09dd910d9c15d40c81 @@ -4553,6 +5794,19 @@ packages: purls: [] size: 23283 timestamp: 1749218442382 +- conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-cudart-static-13.2.75-hecca717_0.conda + sha256: bb55bbd1d5961953889abef8c1c2ec011eff0c4d3dd92f46d06fd4176285f430 + md5: 42208a65f539b7dca4c900681649f599 + depends: + - __glibc >=2.17,<3.0.a0 + - cuda-cudart-static_linux-64 13.2.75 h376f20c_0 + - cuda-version >=13.2,<13.3.0a0 + - libgcc >=14 + - libstdcxx >=14 + license: LicenseRef-NVIDIA-End-User-License-Agreement + purls: [] + size: 24532 + timestamp: 1776110498692 - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-cudart-static_linux-64-12.9.79-h3f2d84a_0.conda sha256: d435f8a19b59b52ce460ee3a6bfd877288a0d1d645119a6ba60f1c3627dc5032 md5: b87bf315d81218dd63eb46cc1eaef775 @@ -4562,6 +5816,15 @@ packages: purls: [] size: 1148889 timestamp: 1749218381225 +- conda: https://conda.anaconda.org/conda-forge/noarch/cuda-cudart-static_linux-64-13.2.75-h376f20c_0.conda + sha256: f4e8c80fe897a426bb6a413b685d7e16eaf52cdbbcf3fa73cf24c994da82b0ef + md5: 6e8700fbcdf3a916d4494db9811d955a + depends: + - cuda-version >=13.2,<13.3.0a0 + license: LicenseRef-NVIDIA-End-User-License-Agreement + purls: [] + size: 1105717 + timestamp: 1776110435801 - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-cudart_linux-64-12.9.79-h3f2d84a_0.conda sha256: 6cde0ace2b995b49d0db2eefb7bc30bf00ffc06bb98ef7113632dec8f8907475 md5: 64508631775fbbf9eca83c84b1df0cae @@ -4571,6 +5834,15 @@ packages: purls: [] size: 197249 timestamp: 1749218394213 +- conda: https://conda.anaconda.org/conda-forge/noarch/cuda-cudart_linux-64-13.2.75-h376f20c_0.conda + sha256: cd03c67b2005e2e74ff278f6f8b17ca7d6f18cf43fb00775833669508d301a83 + md5: ff98f2b9b87eb8b3a4b36745d3d5b93e + depends: + - cuda-version >=13.2,<13.3.0a0 + license: LicenseRef-NVIDIA-End-User-License-Agreement + purls: [] + size: 203339 + timestamp: 1776110448238 - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-driver-dev_linux-64-12.9.79-h3f2d84a_0.conda sha256: a15574d966e73135a79d5e6570c87e13accdb44bd432449b5deea71644ad442c md5: d411828daa36ac84eab210ba3bbe5a64 @@ -4580,6 +5852,15 @@ packages: purls: [] size: 37714 timestamp: 1749218405324 +- conda: https://conda.anaconda.org/conda-forge/noarch/cuda-driver-dev_linux-64-13.2.75-h376f20c_0.conda + sha256: adf85566baf27c8b05785807d6a21b3bb60264cd1b198a83cef4aac84dd74021 + md5: a3fcf07a7dba934172ad464931773730 + depends: + - cuda-version >=13.2,<13.3.0a0 + license: LicenseRef-NVIDIA-End-User-License-Agreement + purls: [] + size: 39432 + timestamp: 1776110460213 - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvcc-12.9.86-hcdd1206_6.conda sha256: f7c5de6b1f0f463f73c78cc73439027cdd5cb94fb4ce099116969812973cabcb md5: 02289b10ac97bac35ad1add086c5072a @@ -4591,6 +5872,17 @@ packages: purls: [] size: 25472 timestamp: 1771619493470 +- conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvcc-13.2.78-hcdd1206_0.conda + sha256: cccfb670f1df05d877e5bda117f7904037980d43f54cc0466efb27130b02e660 + md5: 08c7ce98e7422c620d653b8dd0b860bc + depends: + - cuda-nvcc_linux-64 13.2.78.* + - gcc_linux-64 + - gxx_linux-64 + license: LicenseRef-NVIDIA-End-User-License-Agreement + purls: [] + size: 25484 + timestamp: 1776142712078 - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-nvcc-dev_linux-64-12.9.86-he91c749_2.conda sha256: a1672a34439a72869de9e011e935d41b62fc8dfb1a2700e85ed8a7a129b79981 md5: 19d4e090217f0ea89d30bedb7461c048 @@ -4606,6 +5898,21 @@ packages: purls: [] size: 28121 timestamp: 1753975535813 +- conda: https://conda.anaconda.org/conda-forge/noarch/cuda-nvcc-dev_linux-64-13.2.78-he91c749_0.conda + sha256: 2ec469887c35e379ae0c14f45a96579a8509b0e61977416e9b1cdcca31fea006 + md5: 74d5f18e2461a1b54c438af4b88986d4 + depends: + - cuda-crt-dev_linux-64 13.2.78 ha770c72_0 + - cuda-nvvm-dev_linux-64 13.2.78 ha770c72_0 + - cuda-version >=13.2,<13.3.0a0 + - libgcc >=6 + - libnvptxcompiler-dev_linux-64 13.2.78 ha770c72_0 + constrains: + - gcc_impl_linux-64 >=6,<16.0a0 + license: LicenseRef-NVIDIA-End-User-License-Agreement + purls: [] + size: 29428 + timestamp: 1776121471034 - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvcc-impl-12.9.86-h85509e4_2.conda sha256: 961cf20d411b7685cd744e6c6ed35efea547d095c62151d6f3053d9931bb994d md5: 67458d2685e7503933efa550f3ee40f3 @@ -4623,6 +5930,23 @@ packages: purls: [] size: 27215 timestamp: 1753975546846 +- conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvcc-impl-13.2.78-h85509e4_0.conda + sha256: b72a26f00d79592e018228b460539d98c8d1fceefcd68ac4d38dbd7b352b9c48 + md5: 4b65d9b967d7814742a7f62052872a7c + depends: + - cuda-cudart >=13.2.75,<14.0a0 + - cuda-cudart-dev + - cuda-nvcc-dev_linux-64 13.2.78 he91c749_0 + - cuda-nvcc-tools 13.2.78 he02047a_0 + - cuda-nvvm-impl 13.2.78 h4bc722e_0 + - cuda-version >=13.2,<13.3.0a0 + - libnvptxcompiler-dev 13.2.78 ha770c72_0 + constrains: + - gcc_impl_linux-64 >=6,<16.0a0 + license: LicenseRef-NVIDIA-End-User-License-Agreement + purls: [] + size: 28552 + timestamp: 1776121483085 - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvcc-tools-12.9.86-he02047a_2.conda sha256: 0e849be7b5e4832ca218ec2c48a9ba3a15a984f629e2e54f38a53f4f57220341 md5: dc256c9864c2e8e9c817fbca1c84a4bc @@ -4639,6 +5963,22 @@ packages: purls: [] size: 27380012 timestamp: 1753975454194 +- conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvcc-tools-13.2.78-he02047a_0.conda + sha256: 31d97d74c7c81c22efe5b6d223df6ce6bb2a9c33ce50a6746191002b56a4deb2 + md5: 542607fe8f59653d0f22363c6fe9a689 + depends: + - __glibc >=2.17,<3.0.a0 + - cuda-crt-tools 13.2.78 ha770c72_0 + - cuda-nvvm-tools 13.2.78 h4bc722e_0 + - cuda-version >=13.2,<13.3.0a0 + - libgcc >=12 + - libstdcxx >=12 + constrains: + - gcc_impl_linux-64 >=6,<16.0a0 + license: LicenseRef-NVIDIA-End-User-License-Agreement + purls: [] + size: 34050410 + timestamp: 1776121396530 - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvcc_linux-64-12.9.86-he0b4e1d_6.conda sha256: c506221dafb7cfd081f7d12d01d8e8ab9b29adfcc7d69d61fedd3232174e4016 md5: 359d05bc3ec5d3a467eb558e3844aea2 @@ -4654,6 +5994,21 @@ packages: purls: [] size: 27575 timestamp: 1771619492974 +- conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvcc_linux-64-13.2.78-hb2fc203_0.conda + sha256: 03239914b7f53a2aed3fcc9f6b8b0c7b06b6b85341636d191b62aa439a43a091 + md5: 230423a2b6214c07c6d415976a96bc94 + depends: + - __glibc >=2.17,<3.0.a0 + - cuda-cudart-dev_linux-64 13.2.* + - cuda-driver-dev_linux-64 13.2.* + - cuda-nvcc-dev_linux-64 13.2.78.* + - cuda-nvcc-impl 13.2.78.* + - cuda-nvcc-tools 13.2.78.* + - sysroot_linux-64 >=2.17,<3.0a0 + license: LicenseRef-NVIDIA-End-User-License-Agreement + purls: [] + size: 27594 + timestamp: 1776142711212 - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-nvvm-dev_linux-64-12.9.86-ha770c72_2.conda sha256: 522722dcaffd133e0c7500c69dc70e21ac34d6762dcbaabfe847439f944028f0 md5: 7b386291414c7eea113d25ac28a33772 @@ -4663,6 +6018,15 @@ packages: purls: [] size: 27096 timestamp: 1753975261562 +- conda: https://conda.anaconda.org/conda-forge/noarch/cuda-nvvm-dev_linux-64-13.2.78-ha770c72_0.conda + sha256: 13ce27aa4f3427eae9a6cc7402f08d8515604a56829825fcf9c0de1a1034309e + md5: 531411c4a10ef8d4d045695edf86e4da + depends: + - cuda-version >=13.2,<13.3.0a0 + license: LicenseRef-NVIDIA-End-User-License-Agreement + purls: [] + size: 28442 + timestamp: 1776121235103 - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvvm-impl-12.9.86-h4bc722e_2.conda sha256: f4d34556174e4faa9d374ba2244707082870e1bbc1bb441ad3d9d2cea37da6af md5: 82125dd3c0c4aa009faa00e2829b93d8 @@ -4674,6 +6038,17 @@ packages: purls: [] size: 21425520 timestamp: 1753975283188 +- conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvvm-impl-13.2.78-h4bc722e_0.conda + sha256: 944d132f61f240131abff67646da4040ae585a1f43c6b38fabebb6cc075a7c16 + md5: 5e1021b4c73e795deabbf35ed1317dcb + depends: + - __glibc >=2.17,<3.0.a0 + - cuda-version >=13.2,<13.3.0a0 + - libgcc >=12 + license: LicenseRef-NVIDIA-End-User-License-Agreement + purls: [] + size: 22205958 + timestamp: 1776121258973 - conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvvm-tools-12.9.86-h4bc722e_2.conda sha256: 45f5e881ed0d973132a5475a0b5c066db6e748ef3a831a14dba8374b252e0067 md5: f9af26e4079adcd72688a8e8dbecb229 @@ -4685,6 +6060,17 @@ packages: purls: [] size: 24246736 timestamp: 1753975332907 +- conda: https://conda.anaconda.org/conda-forge/linux-64/cuda-nvvm-tools-13.2.78-h4bc722e_0.conda + sha256: 57636a84b88434c4aca3a3585ee9bb9eb7da6d4a53c3ad034b33f03bd8838f08 + md5: 1b3e427ba98cd5d2a4df1c0e9f573023 + depends: + - __glibc >=2.17,<3.0.a0 + - cuda-version >=13.2,<13.3.0a0 + - libgcc >=12 + license: LicenseRef-NVIDIA-End-User-License-Agreement + purls: [] + size: 25988023 + timestamp: 1776121296869 - conda: https://conda.anaconda.org/conda-forge/noarch/cuda-version-12.9-h4f385c5_3.conda sha256: 5f5f428031933f117ff9f7fcc650e6ea1b3fef5936cf84aa24af79167513b656 md5: b6d5d7f1c171cbd228ea06b556cfa859 @@ -4695,6 +6081,16 @@ packages: purls: [] size: 21578 timestamp: 1746134436166 +- conda: https://conda.anaconda.org/conda-forge/noarch/cuda-version-13.2-he2cc418_3.conda + sha256: 64aebe8ccb3a2c3ff446d3c0c0e88ef4fdb069a5732c03539bf3a37243c4c679 + md5: 45676e3dd76b30ec613f1f822d450eff + constrains: + - __cuda >=13 + - cudatoolkit 13.2|13.2.* + license: LicenseRef-NVIDIA-End-User-License-Agreement + purls: [] + size: 21908 + timestamp: 1773093709154 - conda: https://conda.anaconda.org/conda-forge/noarch/cycler-0.12.1-pyhcf101f3_2.conda sha256: bb47aec5338695ff8efbddbc669064a3b10fe34ad881fb8ad5d64fbfa6910ed1 md5: 4c2a8fef270f6c69591889b93f9f55c1 @@ -4858,6 +6254,11 @@ packages: - pathlib2>=2.3,<3.0 ; python_full_version < '3.4' - six>=1.12,<2.0 requires_python: '>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*' +- pypi: https://files.pythonhosted.org/packages/72/9f/485516087cd8c44183aaf9ab850247a28e2e4a42a4d62eab77c21f673450/flatten_dict-0.5.0-py3-none-any.whl + name: flatten-dict + version: 0.5.0 + sha256: c4bd2010052e4d33241433720d054322403fa7ad914fdc5cb1b31a713d4c561e + requires_python: '>=3.10,<4.0' - conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.62.0-pyh7db6752_0.conda sha256: ed4462f6e49b8dea4e45f7294cca576a38cf4fc41e04bbcd95f9cf55be7776b9 md5: 049f68f9c90f00069c748cd6fb7bfb55 @@ -4874,6 +6275,22 @@ packages: - pkg:pypi/fonttools?source=compressed-mapping size: 837910 timestamp: 1773137210630 +- conda: https://conda.anaconda.org/conda-forge/noarch/fonttools-4.62.1-pyh7db6752_0.conda + sha256: fa77109df37580ce0933d4e6c5a44b2f0c192af2f8e503bfdbfb3b49a8b8e538 + md5: 14cf1ac7a1e29553c6918f7860aab6d8 + depends: + - brotli + - munkres + - python >=3.10 + - unicodedata2 >=15.1.0 + track_features: + - fonttools_no_compile + license: MIT + license_family: MIT + purls: + - pkg:pypi/fonttools?source=hash-mapping + size: 840293 + timestamp: 1776708212291 - conda: https://conda.anaconda.org/conda-forge/noarch/fqdn-1.5.1-pyhd8ed1ab_1.conda sha256: 2509992ec2fd38ab27c7cdb42cf6cadc566a1cc0d1021a2673475d9fa87c6276 md5: d3549fd50d450b6d9e7dddff25dd2110 @@ -4894,8 +6311,18 @@ packages: - libfreetype6 2.14.2 h73754d4_0 license: GPL-2.0-only OR FTL purls: [] - size: 174292 - timestamp: 1772757205296 + size: 174292 + timestamp: 1772757205296 +- conda: https://conda.anaconda.org/conda-forge/linux-64/freetype-2.14.3-ha770c72_0.conda + sha256: c934c385889c7836f034039b43b05ccfa98f53c900db03d8411189892ced090b + md5: 8462b5322567212beeb025f3519fb3e2 + depends: + - libfreetype 2.14.3 ha770c72_0 + - libfreetype6 2.14.3 h73754d4_0 + license: GPL-2.0-only OR FTL + purls: [] + size: 173839 + timestamp: 1774298173462 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/freetype-2.14.2-hce30654_0.conda sha256: 3c02ecdbfd94d25721811f51d0f400bf705005a728011e19db9975a8985e1021 md5: ca730d8e7d1de1f71013edfef0e08f13 @@ -5024,6 +6451,114 @@ packages: - zstandard ; python_full_version < '3.14' and extra == 'test-full' - tqdm ; extra == 'tqdm' requires_python: '>=3.10' +- pypi: https://files.pythonhosted.org/packages/d5/0c/043d5e551459da400957a1395e0febbf771446ff34291afcbe3d8be2a279/fsspec-2026.4.0-py3-none-any.whl + name: fsspec + version: 2026.4.0 + sha256: 11ef7bb35dab8a394fde6e608221d5cf3e8499401c249bebaeaad760a1a8dec2 + requires_dist: + - adlfs ; extra == 'abfs' + - adlfs ; extra == 'adl' + - pyarrow>=1 ; extra == 'arrow' + - dask ; extra == 'dask' + - distributed ; extra == 'dask' + - pre-commit ; extra == 'dev' + - ruff>=0.5 ; extra == 'dev' + - numpydoc ; extra == 'doc' + - sphinx ; extra == 'doc' + - sphinx-design ; extra == 'doc' + - sphinx-rtd-theme ; extra == 'doc' + - yarl ; extra == 'doc' + - dropbox ; extra == 'dropbox' + - dropboxdrivefs ; extra == 'dropbox' + - requests ; extra == 'dropbox' + - adlfs ; extra == 'full' + - aiohttp!=4.0.0a0,!=4.0.0a1 ; extra == 'full' + - dask ; extra == 'full' + - distributed ; extra == 'full' + - dropbox ; extra == 'full' + - dropboxdrivefs ; extra == 'full' + - fusepy ; extra == 'full' + - gcsfs>2024.2.0 ; extra == 'full' + - libarchive-c ; extra == 'full' + - ocifs ; extra == 'full' + - panel ; extra == 'full' + - paramiko ; extra == 'full' + - pyarrow>=1 ; extra == 'full' + - pygit2 ; extra == 'full' + - requests ; extra == 'full' + - s3fs>2024.2.0 ; extra == 'full' + - smbprotocol ; extra == 'full' + - tqdm ; extra == 'full' + - fusepy ; extra == 'fuse' + - gcsfs>2024.2.0 ; extra == 'gcs' + - pygit2 ; extra == 'git' + - requests ; extra == 'github' + - gcsfs ; extra == 'gs' + - panel ; extra == 'gui' + - pyarrow>=1 ; extra == 'hdfs' + - aiohttp!=4.0.0a0,!=4.0.0a1 ; extra == 'http' + - libarchive-c ; extra == 'libarchive' + - ocifs ; extra == 'oci' + - s3fs>2024.2.0 ; extra == 's3' + - paramiko ; extra == 'sftp' + - smbprotocol ; extra == 'smb' + - paramiko ; extra == 'ssh' + - aiohttp!=4.0.0a0,!=4.0.0a1 ; extra == 'test' + - numpy ; extra == 'test' + - pytest ; extra == 'test' + - pytest-asyncio!=0.22.0 ; extra == 'test' + - pytest-benchmark ; extra == 'test' + - pytest-cov ; extra == 'test' + - pytest-mock ; extra == 'test' + - pytest-recording ; extra == 'test' + - pytest-rerunfailures ; extra == 'test' + - requests ; extra == 'test' + - aiobotocore>=2.5.4,<3.0.0 ; extra == 'test-downstream' + - dask[dataframe,test] ; extra == 'test-downstream' + - moto[server]>4,<5 ; extra == 'test-downstream' + - pytest-timeout ; extra == 'test-downstream' + - xarray ; extra == 'test-downstream' + - adlfs ; extra == 'test-full' + - aiohttp!=4.0.0a0,!=4.0.0a1 ; extra == 'test-full' + - backports-zstd ; python_full_version < '3.14' and extra == 'test-full' + - cloudpickle ; extra == 'test-full' + - dask ; extra == 'test-full' + - distributed ; extra == 'test-full' + - dropbox ; extra == 'test-full' + - dropboxdrivefs ; extra == 'test-full' + - fastparquet ; extra == 'test-full' + - fusepy ; extra == 'test-full' + - gcsfs ; extra == 'test-full' + - jinja2 ; extra == 'test-full' + - kerchunk ; extra == 'test-full' + - libarchive-c ; extra == 'test-full' + - lz4 ; extra == 'test-full' + - notebook ; extra == 'test-full' + - numpy ; extra == 'test-full' + - ocifs ; extra == 'test-full' + - pandas<3.0.0 ; extra == 'test-full' + - panel ; extra == 'test-full' + - paramiko ; extra == 'test-full' + - pyarrow ; extra == 'test-full' + - pyarrow>=1 ; extra == 'test-full' + - pyftpdlib ; extra == 'test-full' + - pygit2 ; extra == 'test-full' + - pytest ; extra == 'test-full' + - pytest-asyncio!=0.22.0 ; extra == 'test-full' + - pytest-benchmark ; extra == 'test-full' + - pytest-cov ; extra == 'test-full' + - pytest-mock ; extra == 'test-full' + - pytest-recording ; extra == 'test-full' + - pytest-rerunfailures ; extra == 'test-full' + - python-snappy ; extra == 'test-full' + - requests ; extra == 'test-full' + - smbprotocol ; extra == 'test-full' + - tqdm ; extra == 'test-full' + - urllib3 ; extra == 'test-full' + - zarr ; extra == 'test-full' + - zstandard ; python_full_version < '3.14' and extra == 'test-full' + - tqdm ; extra == 'tqdm' + requires_python: '>=3.10' - conda: https://conda.anaconda.org/conda-forge/linux-64/gcc_impl_linux-64-14.3.0-hbdf3cc3_18.conda sha256: 3b31a273b806c6851e16e9cf63ef87cae28d19be0df148433f3948e7da795592 md5: 30bb690150536f622873758b0e8d6712 @@ -5041,6 +6576,23 @@ packages: purls: [] size: 76302378 timestamp: 1771378056505 +- conda: https://conda.anaconda.org/conda-forge/linux-64/gcc_impl_linux-64-15.2.0-he0086c7_19.conda + sha256: a48400ec4b73369c1c59babe4ad35821b63a88bba0ec40a80cea5f8c53a26b83 + md5: e3be72048d3c4a78b8e27ec48ba06252 + depends: + - binutils_impl_linux-64 >=2.45 + - libgcc >=15.2.0 + - libgcc-devel_linux-64 15.2.0 hcc6f6b0_119 + - libgomp >=15.2.0 + - libsanitizer 15.2.0 h90f66d4_19 + - libstdcxx >=15.2.0 + - libstdcxx-devel_linux-64 15.2.0 hd446a21_119 + - sysroot_linux-64 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + purls: [] + size: 81180457 + timestamp: 1778269124617 - conda: https://conda.anaconda.org/conda-forge/linux-64/gcc_linux-64-14.3.0-h298d278_21.conda sha256: 27ad0cd10dccffca74e20fb38c9f8643ff8fce56eee260bf89fa257d5ab0c90a md5: 1403ed5fe091bd7442e4e8a229d14030 @@ -5053,6 +6605,18 @@ packages: purls: [] size: 28946 timestamp: 1770908213807 +- conda: https://conda.anaconda.org/conda-forge/linux-64/gcc_linux-64-15.2.0-h7be306e_24.conda + sha256: 7e1a77123819f9e6c15439df9a987c66235c53e4c6d12a9ab3cea883258214df + md5: 81f96ca8673107e2da4a6b9e3807cf74 + depends: + - gcc_impl_linux-64 15.2.0.* + - binutils_linux-64 + - sysroot_linux-64 + license: BSD-3-Clause + license_family: BSD + purls: [] + size: 29081 + timestamp: 1777144726741 - conda: https://conda.anaconda.org/conda-forge/linux-64/gmp-6.3.0-hac33072_2.conda sha256: 309cf4f04fec0c31b6771a5809a1909b4b3154a2208f52351e1ada006f4c750c md5: c94a5994ef49749880a8139cf9afcbe1 @@ -5080,6 +6644,23 @@ packages: purls: [] size: 2030992 timestamp: 1768686277371 +- conda: https://conda.anaconda.org/conda-forge/linux-64/gnutls-3.8.13-h18acefa_0.conda + sha256: dbdbb714064914281c755650bc54e1855412e7e2f4c99ad171b5123ed704b2b1 + md5: 7c3de21891993e89aabdadaa603ed835 + depends: + - __glibc >=2.17,<3.0.a0 + - gmp >=6.3.0,<7.0a0 + - libgcc >=14 + - libidn2 >=2,<3.0a0 + - libstdcxx >=14 + - libtasn1 >=4.21.0,<5.0a0 + - nettle >=3.10.1,<3.11.0a0 + - p11-kit >=0.26.2,<0.27.0a0 + license: LGPL-2.1-or-later + license_family: LGPL + purls: [] + size: 2054535 + timestamp: 1778044634746 - pypi: https://files.pythonhosted.org/packages/d2/d8/09bfa816572a4d83bccd6750df1926f79158b1c36c5f73786e26dbe4ee38/greenlet-3.3.2-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl name: greenlet version: 3.3.2 @@ -5102,6 +6683,17 @@ packages: - psutil ; extra == 'test' - setuptools ; extra == 'test' requires_python: '>=3.10' +- pypi: https://files.pythonhosted.org/packages/a3/59/1bd6d7428d6ed9106efbb8c52310c60fd04f6672490f452aeaa3829aa436/greenlet-3.5.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + name: greenlet + version: 3.5.0 + sha256: 8f52a464e4ed91780bdfbbdd2b97197f3accaa629b98c200f4dffada759f3ae7 + requires_dist: + - sphinx ; extra == 'docs' + - furo ; extra == 'docs' + - objgraph ; extra == 'test' + - psutil ; extra == 'test' + - setuptools ; extra == 'test' + requires_python: '>=3.10' - conda: https://conda.anaconda.org/conda-forge/linux-64/gxx_impl_linux-64-14.3.0-h2185e75_18.conda sha256: 38ffca57cc9c264d461ac2ce9464a9d605e0f606d92d831de9075cb0d95fc68a md5: 6514b3a10e84b6a849e1b15d3753eb22 @@ -5115,6 +6707,19 @@ packages: purls: [] size: 14566100 timestamp: 1771378271421 +- conda: https://conda.anaconda.org/conda-forge/linux-64/gxx_impl_linux-64-15.2.0-hda75c37_19.conda + sha256: 3f5288346b9fe233352443b3c2e31f1fde845e39d3e96475fc05ec2e782af158 + md5: 9d41f3899b512199af0a4bb939b83e21 + depends: + - gcc_impl_linux-64 15.2.0 he0086c7_19 + - libstdcxx-devel_linux-64 15.2.0 hd446a21_119 + - sysroot_linux-64 + - tzdata + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + purls: [] + size: 16356816 + timestamp: 1778269332159 - conda: https://conda.anaconda.org/conda-forge/linux-64/gxx_linux-64-14.3.0-he467f4b_21.conda sha256: 1e07c197e0779fa9105e59cd55a835ded96bfde59eb169439736a89b27b48e5d md5: 7b51f4ff82eeb1f386bfee20a7bed3ed @@ -5128,6 +6733,19 @@ packages: purls: [] size: 27503 timestamp: 1770908213813 +- conda: https://conda.anaconda.org/conda-forge/linux-64/gxx_linux-64-15.2.0-he30e93d_24.conda + sha256: 9b40af502e2471ceff9a04a860165d8a6fac659c07dc115ed8357e1a77e2cbe7 + md5: 0787df5104bd63d2186dd3902244e7c3 + depends: + - gxx_impl_linux-64 15.2.0.* + - gcc_linux-64 ==15.2.0 h7be306e_24 + - binutils_linux-64 + - sysroot_linux-64 + license: BSD-3-Clause + license_family: BSD + purls: [] + size: 27602 + timestamp: 1777144726741 - conda: https://conda.anaconda.org/conda-forge/noarch/h11-0.16.0-pyhcf101f3_1.conda sha256: 96cac6573fd35ae151f4d6979bab6fbc90cb6b1fb99054ba19eb075da9822fcb md5: b8993c19b0c32a2f7b66cbb58ca27069 @@ -5155,6 +6773,151 @@ packages: - pkg:pypi/h2?source=hash-mapping size: 95967 timestamp: 1756364871835 +- conda: https://conda.anaconda.org/conda-forge/linux-64/h5py-3.16.0-nompi_py314hddf7a69_102.conda + sha256: 48e18f20bc1ff15433299dd77c20a4160eb29572eea799ae5a73632c6c3d7dfd + md5: d93afa30018997705dd04513eeb5ac0f + depends: + - __glibc >=2.17,<3.0.a0 + - cached-property + - hdf5 >=2.1.0,<3.0a0 + - libgcc >=14 + - numpy >=1.23,<3 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/h5py?source=hash-mapping + size: 1345557 + timestamp: 1775581268685 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/h5py-3.16.0-nompi_py314h658a3ac_102.conda + sha256: 0762ed080bf45ca475da96796a8883a6c719603c44fa9b07a5883785649a4a0f + md5: ab9a6c652fd25407c9cf67b9b6b87496 + depends: + - __osx >=11.0 + - cached-property + - hdf5 >=2.1.0,<3.0a0 + - numpy >=1.23,<3 + - python >=3.14,<3.15.0a0 + - python >=3.14,<3.15.0a0 *_cp314 + - python_abi 3.14.* *_cp314 + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/h5py?source=hash-mapping + size: 1203956 + timestamp: 1775583125726 +- conda: https://conda.anaconda.org/conda-forge/win-64/h5py-3.16.0-nompi_py314h02517ec_102.conda + sha256: 5ee88f1f691829d2430761a26a690c3d880e7cd41e40a4057131360a8904e0bd + md5: 19bdd6358ce2be9ef29f92b1564db61d + depends: + - cached-property + - hdf5 >=2.1.0,<3.0a0 + - numpy >=1.23,<3 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 + - ucrt >=10.0.20348.0 + - vc >=14.3,<15 + - vc14_runtime >=14.44.35208 + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/h5py?source=hash-mapping + size: 1101679 + timestamp: 1775582027560 +- conda: https://conda.anaconda.org/conda-forge/linux-64/hdf5-2.1.0-nompi_h87a9417_105.conda + sha256: beb8a2fb18924ca7b5b82cfb50f008f882f577daef2c00ed88022abea35fec76 + md5: 0d0595612fa229dddb5fc565c260a11f + depends: + - __glibc >=2.17,<3.0.a0 + - aws-c-auth >=0.10.1,<0.10.2.0a0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + - aws-c-http >=0.10.13,<0.10.14.0a0 + - aws-c-io >=0.26.3,<0.26.4.0a0 + - aws-c-s3 >=0.12.2,<0.12.3.0a0 + - aws-c-sdkutils >=0.2.4,<0.2.5.0a0 + - libaec >=1.1.5,<2.0a0 + - libcurl >=8.20.0,<9.0a0 + - libgcc >=14 + - libgfortran + - libgfortran5 >=14.3.0 + - libstdcxx >=14 + - libzlib >=1.3.2,<2.0a0 + - openssl >=3.5.6,<4.0a0 + license: BSD-3-Clause + license_family: BSD + purls: [] + size: 4713397 + timestamp: 1777861887131 +- conda: https://conda.anaconda.org/conda-forge/linux-64/hdf5-2.1.0-nompi_hd4fcb43_104.conda + sha256: c6ff674a4a5a237fcf748fed8f64e79df54b42189986e705f35ba64dc6603235 + md5: 1d92558abd05cea0577f83a5eca38733 + depends: + - __glibc >=2.17,<3.0.a0 + - aws-c-auth >=0.10.1,<0.10.2.0a0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + - aws-c-http >=0.10.12,<0.10.13.0a0 + - aws-c-io >=0.26.3,<0.26.4.0a0 + - aws-c-s3 >=0.11.5,<0.11.6.0a0 + - aws-c-sdkutils >=0.2.4,<0.2.5.0a0 + - libaec >=1.1.5,<2.0a0 + - libcurl >=8.19.0,<9.0a0 + - libgcc >=14 + - libgfortran + - libgfortran5 >=14.3.0 + - libstdcxx >=14 + - libzlib >=1.3.2,<2.0a0 + - openssl >=3.5.5,<4.0a0 + license: BSD-3-Clause + license_family: BSD + purls: [] + size: 4138489 + timestamp: 1775243967708 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/hdf5-2.1.0-nompi_hc95e3eb_104.conda + sha256: 5b96accf983be97718fbfaddd6706591d7ef6511b4ccdac8a09f6b9899d1b284 + md5: e5390fd4a3b964a3ed619480df918294 + depends: + - __osx >=11.0 + - aws-c-auth >=0.10.1,<0.10.2.0a0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + - aws-c-http >=0.10.12,<0.10.13.0a0 + - aws-c-io >=0.26.3,<0.26.4.0a0 + - aws-c-s3 >=0.11.5,<0.11.6.0a0 + - aws-c-sdkutils >=0.2.4,<0.2.5.0a0 + - libaec >=1.1.5,<2.0a0 + - libcurl >=8.19.0,<9.0a0 + - libcxx >=19 + - libgfortran + - libgfortran5 >=14.3.0 + - libzlib >=1.3.2,<2.0a0 + - openssl >=3.5.5,<4.0a0 + license: BSD-3-Clause + license_family: BSD + purls: [] + size: 3418702 + timestamp: 1775244340092 +- conda: https://conda.anaconda.org/conda-forge/win-64/hdf5-2.1.0-nompi_hd96b29f_104.conda + sha256: ad660bf000e2a905ebdc8c297d9b3851ac48834284b673e655adda490425f652 + md5: 37c1890c40a1514fa92ba13e27d5b1c3 + depends: + - aws-c-auth >=0.10.1,<0.10.2.0a0 + - aws-c-common >=0.12.6,<0.12.7.0a0 + - aws-c-http >=0.10.12,<0.10.13.0a0 + - aws-c-io >=0.26.3,<0.26.4.0a0 + - aws-c-s3 >=0.11.5,<0.11.6.0a0 + - aws-c-sdkutils >=0.2.4,<0.2.5.0a0 + - libaec >=1.1.5,<2.0a0 + - libcurl >=8.19.0,<9.0a0 + - libzlib >=1.3.2,<2.0a0 + - openssl >=3.5.5,<4.0a0 + - ucrt >=10.0.20348.0 + - vc >=14.3,<15 + - vc14_runtime >=14.44.35208 + license: BSD-3-Clause + license_family: BSD + purls: [] + size: 2564561 + timestamp: 1775244102272 - conda: https://conda.anaconda.org/conda-forge/noarch/hpack-4.1.0-pyhd8ed1ab_0.conda sha256: 6ad78a180576c706aabeb5b4c8ceb97c0cb25f1e112d76495bff23e3779948ba md5: 0a802cb9888dd14eeefc611f05c40b6e @@ -5240,6 +7003,18 @@ packages: - pkg:pypi/idna?source=hash-mapping size: 50721 timestamp: 1760286526795 +- conda: https://conda.anaconda.org/conda-forge/noarch/idna-3.13-pyhcf101f3_0.conda + sha256: 9ab620e6f64bb67737bd7bc1ad6f480770124e304c6710617aba7fe60b089f48 + md5: fb7130c190f9b4ec91219840a05ba3ac + depends: + - python >=3.10 + - python + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/idna?source=hash-mapping + size: 59038 + timestamp: 1776947141407 - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.7.0-pyhe01879c_1.conda sha256: c18ab120a0613ada4391b15981d86ff777b5690ca461ea7e9e49531e8f374745 md5: 63ccfdc3a3ce25b027b8767eb722fca8 @@ -5253,6 +7028,19 @@ packages: - pkg:pypi/importlib-metadata?source=hash-mapping size: 34641 timestamp: 1747934053147 +- conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.8.0-pyhcf101f3_0.conda + sha256: 82ab2a0d91ca1e7e63ab6a4939356667ef683905dea631bc2121aa534d347b16 + md5: 080594bf4493e6bae2607e65390c520a + depends: + - python >=3.10 + - zipp >=3.20 + - python + license: Apache-2.0 + license_family: APACHE + purls: + - pkg:pypi/importlib-metadata?source=hash-mapping + size: 34387 + timestamp: 1773931568510 - pypi: https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl name: iniconfig version: 2.3.0 @@ -5395,6 +7183,30 @@ packages: - pkg:pypi/ipython?source=compressed-mapping size: 648197 timestamp: 1772790149194 +- conda: https://conda.anaconda.org/conda-forge/noarch/ipython-9.13.0-pyh53cf698_0.conda + sha256: a0af49948a1842dfd15a0b0b2fd56c94ddbd07e07a6c8b4bc70d43015eafaff0 + md5: 73e9657cd19605740d21efb14d8d0cb9 + depends: + - __unix + - decorator >=5.1.0 + - ipython_pygments_lexers >=1.0.0 + - jedi >=0.18.2 + - matplotlib-inline >=0.1.6 + - prompt-toolkit >=3.0.41,<3.1.0 + - psutil >=7 + - pygments >=2.14.0 + - python >=3.11 + - stack_data >=0.6.0 + - traitlets >=5.13.0 + - typing_extensions >=4.6 + - pexpect >4.6 + - python + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/ipython?source=hash-mapping + size: 651632 + timestamp: 1777038396606 - conda: https://conda.anaconda.org/conda-forge/noarch/ipython_pygments_lexers-1.1.1-pyhd8ed1ab_0.conda sha256: 894682a42a7d659ae12878dbcb274516a7031bbea9104e92f8e88c1f2765a104 md5: bd80ba060603cc228d9d81c257093119 @@ -5449,6 +7261,36 @@ packages: - kubernetes ; extra == 'k8s' - xprof ; extra == 'xprof' requires_python: '>=3.11' +- pypi: https://files.pythonhosted.org/packages/70/aa/dfac6d72cc35bc07e7587115b6946e333ef4ccb2e6cd26ecf639438c5d26/jax-0.10.0-py3-none-any.whl + name: jax + version: 0.10.0 + sha256: 76c42ba163c8db3dc2e449e225b888c0edfb623ded31efdc96d85e0fda1d26e8 + requires_dist: + - jaxlib<=0.10.0,>=0.10.0 + - ml-dtypes>=0.5.0 + - numpy>=2.0 + - opt-einsum + - scipy>=1.14 + - jaxlib==0.10.0 ; extra == 'minimum-jaxlib' + - jaxlib==0.9.2 ; extra == 'ci' + - jaxlib<=0.10.0,>=0.10.0 ; extra == 'tpu' + - libtpu==0.0.40.* ; extra == 'tpu' + - requests ; extra == 'tpu' + - jaxlib<=0.10.0,>=0.10.0 ; extra == 'cuda' + - jax-cuda12-plugin[with-cuda]<=0.10.0,>=0.10.0 ; extra == 'cuda' + - jaxlib<=0.10.0,>=0.10.0 ; extra == 'cuda12' + - jax-cuda12-plugin[with-cuda]<=0.10.0,>=0.10.0 ; extra == 'cuda12' + - jaxlib<=0.10.0,>=0.10.0 ; extra == 'cuda13' + - jax-cuda13-plugin[with-cuda]<=0.10.0,>=0.10.0 ; extra == 'cuda13' + - jaxlib<=0.10.0,>=0.10.0 ; extra == 'cuda12-local' + - jax-cuda12-plugin<=0.10.0,>=0.10.0 ; extra == 'cuda12-local' + - jaxlib<=0.10.0,>=0.10.0 ; extra == 'cuda13-local' + - jax-cuda13-plugin<=0.10.0,>=0.10.0 ; extra == 'cuda13-local' + - jaxlib<=0.10.0,>=0.10.0 ; extra == 'rocm7-local' + - jax-rocm7-plugin==0.10.0.* ; extra == 'rocm7-local' + - kubernetes ; extra == 'k8s' + - xprof ; extra == 'xprof' + requires_python: '>=3.11' - pypi: https://files.pythonhosted.org/packages/a7/2c/8ddb471091b46de99bba7eaa7f4e3983f9c8e74e310e585ff08915ce8b7a/jax_cuda12_pjrt-0.9.1-py3-none-manylinux_2_27_x86_64.whl name: jax-cuda12-pjrt version: 0.9.1 @@ -5472,6 +7314,30 @@ packages: - nvidia-cuda-nvrtc-cu12>=12.1.55 ; sys_platform == 'linux' and extra == 'with-cuda' - nvidia-nvshmem-cu12>=3.2.5 ; sys_platform == 'linux' and extra == 'with-cuda' requires_python: '>=3.11' +- pypi: https://files.pythonhosted.org/packages/21/98/77f15d81fd0637da454e453c8456d4a2b5c8b2e66823b4237ee8689152cf/jax_cuda13_pjrt-0.10.0-py3-none-manylinux_2_27_x86_64.whl + name: jax-cuda13-pjrt + version: 0.10.0 + sha256: 848d6ae3e663d040c53e902ea9d380a902bfa5e7da881053cec408360036fa7a +- pypi: https://files.pythonhosted.org/packages/8f/2b/5c63c29d155afdf1d7827f8c04efe8cac47fc6783d8c53959e43de879dcc/jax_cuda13_plugin-0.10.0-cp314-cp314-manylinux_2_27_x86_64.whl + name: jax-cuda13-plugin + version: 0.10.0 + sha256: 09dff8dadac0334dccd43a79b00bb81f27df74ab05656b78d10ef784a29ea5f6 + requires_dist: + - jax-cuda13-pjrt==0.10.0 + - nvidia-cublas>=13.0.0.19 ; sys_platform == 'linux' and extra == 'with-cuda' + - nvidia-cuda-cupti>=13.0.48 ; sys_platform == 'linux' and extra == 'with-cuda' + - nvidia-cuda-nvcc>=13.0.48 ; sys_platform == 'linux' and extra == 'with-cuda' + - nvidia-cuda-runtime>=13.0.48 ; sys_platform == 'linux' and extra == 'with-cuda' + - nvidia-cudnn-cu13>=9.12.0.46,<10.0 ; sys_platform == 'linux' and extra == 'with-cuda' + - nvidia-cufft>=12.0.0.15 ; sys_platform == 'linux' and extra == 'with-cuda' + - nvidia-cusolver>=12.0.3.29 ; sys_platform == 'linux' and extra == 'with-cuda' + - nvidia-cusparse>=12.6.2.49 ; sys_platform == 'linux' and extra == 'with-cuda' + - nvidia-nccl-cu13>=2.27.7 ; sys_platform == 'linux' and extra == 'with-cuda' + - nvidia-nvjitlink>=13.0.39 ; sys_platform == 'linux' and extra == 'with-cuda' + - nvidia-cuda-nvrtc>=13.0.48 ; sys_platform == 'linux' and extra == 'with-cuda' + - nvidia-nvshmem-cu13>=3.3.20 ; sys_platform == 'linux' and extra == 'with-cuda' + - nvidia-nvvm ; extra == 'with-cuda' + requires_python: '>=3.11' - pypi: https://files.pythonhosted.org/packages/54/0d/a8e27c1c434e489883c1182bd52de27775b8a78013de62e6eabf80991df5/jaxlib-0.9.1-cp314-cp314-win_amd64.whl name: jaxlib version: 0.9.1 @@ -5499,6 +7365,15 @@ packages: - numpy>=2.0 - ml-dtypes>=0.5.0 requires_python: '>=3.11' +- pypi: https://files.pythonhosted.org/packages/a1/8e/b2a08ffc51c93842de71f7f988865cebfa7f43d6721957812dc8cc8b9d40/jaxlib-0.10.0-cp314-cp314-manylinux_2_27_x86_64.whl + name: jaxlib + version: 0.10.0 + sha256: 2a42cf04c0f88bc03b150a17fa7ddbb2f40e096667ec8a1b840ed87913e6e735 + requires_dist: + - scipy>=1.14 + - numpy>=2.0 + - ml-dtypes>=0.5.0 + requires_python: '>=3.11' - conda: https://conda.anaconda.org/conda-forge/noarch/jedi-0.19.2-pyhd8ed1ab_1.conda sha256: 92c4d217e2dc68983f724aa983cca5464dcb929c566627b26a2511159667dba8 md5: a4f4c5dc9b80bc50e0d3dc4e6e8f1bd9 @@ -5523,11 +7398,18 @@ packages: - pkg:pypi/jinja2?source=compressed-mapping size: 120685 timestamp: 1764517220861 -- pypi: https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl - name: joblib - version: 1.5.3 - sha256: 5fc3c5039fc5ca8c0276333a188bbd59d6b7ab37fe6632daa76bc7f9ec18e713 - requires_python: '>=3.9' +- conda: https://conda.anaconda.org/conda-forge/noarch/joblib-1.5.3-pyhd8ed1ab_0.conda + sha256: 301539229d7be6420c084490b8145583291123f0ce6b92f56be5948a2c83a379 + md5: 615de2a4d97af50c350e5cf160149e77 + depends: + - python >=3.10 + - setuptools + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/joblib?source=hash-mapping + size: 226448 + timestamp: 1765794135253 - conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.13.0-pyhd8ed1ab_0.conda sha256: ba03ca5a6db38d9f48bd30172e8c512dea7a686a5c7701c6fcdb7b3023dae2ad md5: 8d5f66ebf832c4ce28d5c37a0e76605c @@ -5539,6 +7421,17 @@ packages: - pkg:pypi/json5?source=hash-mapping size: 34017 timestamp: 1767325114901 +- conda: https://conda.anaconda.org/conda-forge/noarch/json5-0.14.0-pyhd8ed1ab_0.conda + sha256: 9daa95bd164c8fa23b3ab196e906ef806141d749eddce2a08baa064f722d25fa + md5: 1269891272187518a0a75c286f7d0bbf + depends: + - python >=3.10 + license: Apache-2.0 + license_family: APACHE + purls: + - pkg:pypi/json5?source=hash-mapping + size: 34731 + timestamp: 1774655440045 - conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.0.0-pyhcf101f3_3.conda sha256: 1a1328476d14dfa8b84dbacb7f7cd7051c175498406dc513ca6c679dc44f3981 md5: cd2214824e36b0180141d422aba01938 @@ -5551,6 +7444,18 @@ packages: - pkg:pypi/jsonpointer?source=hash-mapping size: 13967 timestamp: 1765026384757 +- conda: https://conda.anaconda.org/conda-forge/noarch/jsonpointer-3.1.1-pyhcf101f3_0.conda + sha256: a3d10301b6ff399ba1f3d39e443664804a3d28315a4fb81e745b6817845f70ae + md5: 89bf346df77603055d3c8fe5811691e6 + depends: + - python >=3.10 + - python + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/jsonpointer?source=hash-mapping + size: 14190 + timestamp: 1774311356147 - conda: https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.26.0-pyhcf101f3_0.conda sha256: db973a37d75db8e19b5f44bbbdaead0c68dde745407f281e2a7fe4db74ec51d7 md5: ada41c863af263cc4c5fcbaff7c3e4dc @@ -5612,6 +7517,19 @@ packages: - markdown ; extra == 'docs' - pandas ; extra == 'docs' requires_python: '>=3.9' +- pypi: https://files.pythonhosted.org/packages/b5/83/205e7af4153d9690c3cb94fa9cea670c0d26ce7f022aaa589a9e136f1491/jupyter_book-2.1.5-py3-none-any.whl + name: jupyter-book + version: 2.1.5 + sha256: 19eedc70bb8d5ed5de0f7f3cb8de312da3a50900dcdda9b0c5a9704410a7758d + requires_dist: + - ipykernel + - jupyter-core + - jupyter-server + - platformdirs>=4.2.2 + - nodeenv>=1.9.1 + - markdown ; extra == 'docs' + - pandas ; extra == 'docs' + requires_python: '>=3.9' - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter-lsp-2.3.0-pyhcf101f3_0.conda sha256: 897ad2e2c2335ef3c2826d7805e16002a1fd0d509b4ae0bc66617f0e0ff07bc2 md5: 62b7c96c6cd77f8173cc5cada6a9acaa @@ -5626,6 +7544,20 @@ packages: - pkg:pypi/jupyter-lsp?source=hash-mapping size: 60377 timestamp: 1756388269267 +- conda: https://conda.anaconda.org/conda-forge/noarch/jupyter-lsp-2.3.1-pyhcf101f3_0.conda + sha256: 3766e2ae59641c172cec8a821528bfa6bf9543ffaaeb8b358bfd5259dcf18e4e + md5: 0c3b465ceee138b9c39279cc02e5c4a0 + depends: + - importlib-metadata >=4.8.3 + - jupyter_server >=1.1.2 + - python >=3.10 + - python + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/jupyter-lsp?source=hash-mapping + size: 61633 + timestamp: 1775136333147 - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_client-8.8.0-pyhcf101f3_0.conda sha256: e402bd119720862a33229624ec23645916a7d47f30e1711a4af9e005162b84f3 md5: 8a3d6d0523f66cf004e563a50d9392b3 @@ -5699,6 +7631,26 @@ packages: - pkg:pypi/jupyter-events?source=hash-mapping size: 24306 timestamp: 1770937604863 +- conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_events-0.12.1-pyhcf101f3_0.conda + sha256: c7edb5682c6316a95ad781dccb1b6589cd2ec0bf94f23c21152974eb0363b5d7 + md5: bf42ee94c750c0b2e7e998b79ac299ea + depends: + - jsonschema-with-format-nongpl >=4.18.0 + - packaging + - python >=3.10 + - python-json-logger >=2.0.4 + - pyyaml >=5.3 + - referencing + - rfc3339-validator + - rfc3986-validator >=0.1.1 + - traitlets >=5.3 + - python + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/jupyter-events?source=hash-mapping + size: 24002 + timestamp: 1776861872237 - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_server-2.17.0-pyhcf101f3_0.conda sha256: 74c4e642be97c538dae1895f7052599dfd740d8bd251f727bce6453ce8d6cd9a md5: d79a87dcfa726bcea8e61275feed6f83 @@ -5729,6 +7681,36 @@ packages: - pkg:pypi/jupyter-server?source=hash-mapping size: 347094 timestamp: 1755870522134 +- conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_server-2.18.2-pyhcf101f3_0.conda + sha256: 04fb8ea7749f67abaf76df6257bf86688e1389ceed55eb4fb0176fd2e882dbd6 + md5: 5ee7945accf0f215ddd6055d25d7cd83 + depends: + - anyio >=3.1.0 + - argon2-cffi >=21.1 + - jinja2 >=3.0.3 + - jupyter_client >=7.4.4 + - jupyter_core >=4.12,!=5.0.* + - jupyter_events >=0.11.0 + - jupyter_server_terminals >=0.4.4 + - nbconvert-core >=6.4.4 + - nbformat >=5.3.0 + - overrides >=5.0 + - packaging >=22.0 + - prometheus_client >=0.9 + - python >=3.10 + - pyzmq >=24 + - send2trash >=1.8.2 + - terminado >=0.8.3 + - tornado >=6.2.0 + - traitlets >=5.6.0 + - websocket-client >=1.7 + - python + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/jupyter-server?source=hash-mapping + size: 360522 + timestamp: 1778060967727 - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_server_terminals-0.5.4-pyhcf101f3_0.conda sha256: 5eda79ed9f53f590031d29346abd183051263227dd9ee667b5ca1133ce297654 md5: 7b8bace4943e0dc345fc45938826f2b8 @@ -5767,6 +7749,31 @@ packages: - pkg:pypi/jupyterlab?source=compressed-mapping size: 8245973 timestamp: 1773240966438 +- conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab-4.5.7-pyhd8ed1ab_0.conda + sha256: b85befad5ba1f50c0cc042a2ffb26441d13ffc2f18572dc20d3541476da0c7b9 + md5: 2ffe77234070324e763a6eddabb5f467 + depends: + - async-lru >=1.0.0 + - httpx >=0.25.0,<1 + - ipykernel >=6.5.0,!=6.30.0 + - jinja2 >=3.0.3 + - jupyter-lsp >=2.0.0 + - jupyter_core + - jupyter_server >=2.4.0,<3 + - jupyterlab_server >=2.28.0,<3 + - notebook-shim >=0.2 + - packaging + - python >=3.10 + - setuptools >=41.1.0 + - tomli >=1.2.2 + - tornado >=6.2.0 + - traitlets + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/jupyterlab?source=hash-mapping + size: 8861204 + timestamp: 1777483115382 - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.3.0-pyhd8ed1ab_2.conda sha256: dc24b900742fdaf1e077d9a3458fd865711de80bca95fe3c6d46610c532c6ef0 md5: fd312693df06da3578383232528c468d @@ -5811,6 +7818,16 @@ packages: - packaging - pytest-timeout>=2.4.0 requires_python: '>=3.8' +- pypi: https://files.pythonhosted.org/packages/9e/b9/a6d8bb7d228940f01885bd9f327ab7f9d366a9be775c4bf366bf9d9477ae/kaleido-1.3.0-py3-none-any.whl + name: kaleido + version: 1.3.0 + sha256: 52714dfd38e8f2a114831826200c40bb10d0ca0c11d4272f3f48ad499cd8f8ea + requires_dist: + - choreographer>=1.3.0 + - logistro>=1.0.8 + - orjson>=3.10.15 + - packaging + requires_python: '>=3.8' - conda: https://conda.anaconda.org/conda-forge/noarch/kernel-headers_linux-64-4.18.0-he073ed8_9.conda sha256: 41557eeadf641de6aeae49486cef30d02a6912d8da98585d687894afd65b356a md5: 86d9cba083cd041bfbf242a01a7a1999 @@ -5943,6 +7960,19 @@ packages: purls: [] size: 249959 timestamp: 1768184673131 +- conda: https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.19.1-h0c24ade_0.conda + sha256: eb89c6c39f2f6a93db55723dbb2f6bba8c8e63e6312bf1abf13e6e9ff45849c8 + md5: f92f984b558e6e6204014b16d212b271 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + - libjpeg-turbo >=3.1.4.1,<4.0a0 + - libtiff >=4.7.1,<4.8.0a0 + license: MIT + license_family: MIT + purls: [] + size: 251086 + timestamp: 1778079286384 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/lcms2-2.18-hdfa7624_0.conda sha256: d768da024ab74a4b30642401877fa914a68bdc238667f16b1ec2e0e98b2451a6 md5: 6631a7bd2335bb9699b1dbc234b19784 @@ -5982,6 +8012,19 @@ packages: purls: [] size: 725507 timestamp: 1770267139900 +- conda: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.45.1-default_hbd61a6d_102.conda + sha256: 3d584956604909ff5df353767f3a2a2f60e07d070b328d109f30ac40cd62df6c + md5: 18335a698559cdbcd86150a48bf54ba6 + depends: + - __glibc >=2.17,<3.0.a0 + - zstd >=1.5.7,<1.6.0a0 + constrains: + - binutils_impl_linux-64 2.45.1 + license: GPL-3.0-only + license_family: GPL + purls: [] + size: 728002 + timestamp: 1774197446916 - conda: https://conda.anaconda.org/conda-forge/linux-64/lerc-4.1.0-hdb68285_0.conda sha256: f84cb54782f7e9cea95e810ea8fef186e0652d0fa73d3009914fa2c1262594e1 md5: a752488c68f2e7c456bcbd8f16eec275 @@ -6017,6 +8060,41 @@ packages: purls: [] size: 172395 timestamp: 1773113455582 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libaec-1.1.5-h088129d_0.conda + sha256: 822e4ae421a7e9c04e841323526321185f6659222325e1a9aedec811c686e688 + md5: 86f7414544ae606282352fa1e116b41f + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + - libstdcxx >=14 + license: BSD-2-Clause + license_family: BSD + purls: [] + size: 36544 + timestamp: 1769221884824 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libaec-1.1.5-h8664d51_0.conda + sha256: af9cd8db11eb719e38a3340c88bb4882cf19b5b4237d93845224489fc2a13b46 + md5: 13e6d9ae0efbc9d2e9a01a91f4372b41 + depends: + - __osx >=11.0 + - libcxx >=19 + license: BSD-2-Clause + license_family: BSD + purls: [] + size: 30390 + timestamp: 1769222133373 +- conda: https://conda.anaconda.org/conda-forge/win-64/libaec-1.1.5-haf901d7_0.conda + sha256: e54c08964262c73671d9e80e400333e59c617e0b454476ad68933c0c458156c8 + md5: 43b6385cfad52a7083f2c41984eb4e91 + depends: + - ucrt >=10.0.20348.0 + - vc >=14.3,<15 + - vc14_runtime >=14.44.35208 + license: BSD-2-Clause + license_family: BSD + purls: [] + size: 34463 + timestamp: 1769221960556 - conda: https://conda.anaconda.org/conda-forge/linux-64/libarchive-3.8.6-gpl_hc2c16d8_100.conda sha256: 69ea8da58658ad26cb64fb0bfccd8a3250339811f0b57c6b8a742e5e51bacf70 md5: 981d372c31a23e1aa9965d4e74d085d5 @@ -6037,6 +8115,26 @@ packages: purls: [] size: 887139 timestamp: 1773243188979 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libarchive-3.8.7-gpl_hc2c16d8_100.conda + sha256: 2071a3eb03a868effef273eee8bb7baed6ee9fb2fb94421e9958dcf48ab2c599 + md5: dbeb5c8321cb2408d406a3da16a0ff0d + depends: + - __glibc >=2.17,<3.0.a0 + - bzip2 >=1.0.8,<2.0a0 + - libgcc >=14 + - liblzma >=5.8.3,<6.0a0 + - libxml2 + - libxml2-16 >=2.14.6 + - libzlib >=1.3.2,<2.0a0 + - lz4-c >=1.10.0,<1.11.0a0 + - lzo >=2.10,<3.0a0 + - openssl >=3.5.6,<4.0a0 + - zstd >=1.5.7,<1.6.0a0 + license: BSD-2-Clause + license_family: BSD + purls: [] + size: 891114 + timestamp: 1776096017113 - conda: https://conda.anaconda.org/conda-forge/linux-64/libblas-3.11.0-5_h4a7cf45_openblas.conda build_number: 5 sha256: 18c72545080b86739352482ba14ba2c4815e19e26a7417ca21a95b76ec8da24c @@ -6055,6 +8153,24 @@ packages: purls: [] size: 18213 timestamp: 1765818813880 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libblas-3.11.0-6_h4a7cf45_openblas.conda + build_number: 6 + sha256: 7bfe936dbb5db04820cf300a9cc1f5ee8d5302fc896c2d66e30f1ee2f20fbfd6 + md5: 6d6d225559bfa6e2f3c90ee9c03d4e2e + depends: + - libopenblas >=0.3.32,<0.3.33.0a0 + - libopenblas >=0.3.32,<1.0a0 + constrains: + - blas 2.306 openblas + - liblapack 3.11.0 6*_openblas + - liblapacke 3.11.0 6*_openblas + - libcblas 3.11.0 6*_openblas + - mkl <2026 + license: BSD-3-Clause + license_family: BSD + purls: [] + size: 18621 + timestamp: 1774503034895 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libblas-3.11.0-5_h51639a9_openblas.conda build_number: 5 sha256: 620a6278f194dcabc7962277da6835b1e968e46ad0c8e757736255f5ddbfca8d @@ -6209,6 +8325,21 @@ packages: purls: [] size: 18194 timestamp: 1765818837135 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libcblas-3.11.0-6_h0358290_openblas.conda + build_number: 6 + sha256: 57edafa7796f6fa3ebbd5367692dd4c7f552be42109c2dd1a7c89b55089bf374 + md5: 36ae340a916635b97ac8a0655ace2a35 + depends: + - libblas 3.11.0 6_h4a7cf45_openblas + constrains: + - blas 2.306 openblas + - liblapack 3.11.0 6*_openblas + - liblapacke 3.11.0 6*_openblas + license: BSD-3-Clause + license_family: BSD + purls: [] + size: 18622 + timestamp: 1774503050205 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcblas-3.11.0-5_hb0561ab_openblas.conda build_number: 5 sha256: 38809c361bbd165ecf83f7f05fae9b791e1baa11e4447367f38ae1327f402fc0 @@ -6256,6 +8387,54 @@ packages: purls: [] size: 466704 timestamp: 1773218522665 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libcurl-8.20.0-hcf29cc6_0.conda + sha256: 75963a5dd913311f59a35dbd307592f4fa754c4808aff9c33edb430c415e38eb + md5: c3cc2864f82a944bc90a7beb4d3b0e88 + depends: + - __glibc >=2.17,<3.0.a0 + - krb5 >=1.22.2,<1.23.0a0 + - libgcc >=14 + - libnghttp2 >=1.68.1,<2.0a0 + - libssh2 >=1.11.1,<2.0a0 + - libzlib >=1.3.2,<2.0a0 + - openssl >=3.5.6,<4.0a0 + - zstd >=1.5.7,<1.6.0a0 + license: curl + license_family: MIT + purls: [] + size: 468706 + timestamp: 1777461492876 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcurl-8.19.0-hd5a2499_0.conda + sha256: c4d581b067fa60f9dc0e1c5f18b756760ff094a03139e6b206eb98d185ae2bb1 + md5: 9fc7771fc8104abed9119113160be15a + depends: + - __osx >=11.0 + - krb5 >=1.22.2,<1.23.0a0 + - libnghttp2 >=1.67.0,<2.0a0 + - libssh2 >=1.11.1,<2.0a0 + - libzlib >=1.3.1,<2.0a0 + - openssl >=3.5.5,<4.0a0 + - zstd >=1.5.7,<1.6.0a0 + license: curl + license_family: MIT + purls: [] + size: 399616 + timestamp: 1773219210246 +- conda: https://conda.anaconda.org/conda-forge/win-64/libcurl-8.19.0-h8206538_0.conda + sha256: 6b2143ba5454b399dab4471e9e1d07352a2f33b569975e6b8aedc2d9bf51cbb0 + md5: ed181e29a7ebf0f60b84b98d6140a340 + depends: + - krb5 >=1.22.2,<1.23.0a0 + - libssh2 >=1.11.1,<2.0a0 + - libzlib >=1.3.1,<2.0a0 + - ucrt >=10.0.20348.0 + - vc >=14.3,<15 + - vc14_runtime >=14.44.35208 + license: curl + license_family: MIT + purls: [] + size: 392543 + timestamp: 1773218585056 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-22.1.1-h55c6f16_0.conda sha256: 3c8142cdd3109c250a926c492ec45bc954697b288e5d1154ada95272ffa21be8 md5: 7a290d944bc0c481a55baf33fa289deb @@ -6334,6 +8513,14 @@ packages: purls: [] size: 112766 timestamp: 1702146165126 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libev-4.33-h93a5062_2.conda + sha256: 95cecb3902fbe0399c3a7e67a5bed1db813e5ab0e22f4023a5e0f722f2cc214f + md5: 36d33e440c31857372a72137f78bacf5 + license: BSD-2-Clause + license_family: BSD + purls: [] + size: 107458 + timestamp: 1702146414478 - conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.7.4-hecca717_0.conda sha256: d78f1d3bea8c031d2f032b760f36676d87929b18146351c4464c66b0869df3f5 md5: e7f7ce06ec24cfcfb9e36d28cf82ba57 @@ -6347,6 +8534,19 @@ packages: purls: [] size: 76798 timestamp: 1771259418166 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.8.0-hecca717_0.conda + sha256: ea33c40977ea7a2c3658c522230058395bc2ee0d89d99f0711390b6a1ee80d12 + md5: a3b390520c563d78cc58974de95a03e5 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + constrains: + - expat 2.8.0.* + license: MIT + license_family: MIT + purls: [] + size: 77241 + timestamp: 1777846112704 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libexpat-2.7.4-hf6b4638_0.conda sha256: 03887d8080d6a8fe02d75b80929271b39697ecca7628f0657d7afaea87761edf md5: a92e310ae8dfc206ff449f362fc4217f @@ -6415,6 +8615,15 @@ packages: purls: [] size: 8035 timestamp: 1772757210108 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libfreetype-2.14.3-ha770c72_0.conda + sha256: 38f014a7129e644636e46064ecd6b1945e729c2140e21d75bb476af39e692db2 + md5: e289f3d17880e44b633ba911d57a321b + depends: + - libfreetype6 >=2.14.3 + license: GPL-2.0-only OR FTL + purls: [] + size: 8049 + timestamp: 1774298163029 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libfreetype-2.14.2-hce30654_0.conda sha256: 6061ef5321b8e697d5577d8dfe7a4c75bfe3e706c956d0d84bfec6bea3ed9f77 md5: a3a53232936b55ffea76806aefe19e8b @@ -6447,6 +8656,20 @@ packages: purls: [] size: 386316 timestamp: 1772757193822 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libfreetype6-2.14.3-h73754d4_0.conda + sha256: 16f020f96da79db1863fcdd8f2b8f4f7d52f177dd4c58601e38e9182e91adf1d + md5: fb16b4b69e3f1dcfe79d80db8fd0c55d + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + - libpng >=1.6.55,<1.7.0a0 + - libzlib >=1.3.2,<2.0a0 + constrains: + - freetype >=2.14.3 + license: GPL-2.0-only OR FTL + purls: [] + size: 384575 + timestamp: 1774298162622 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libfreetype6-2.14.2-hdfa99f5_0.conda sha256: 24dd0e0bee56e87935f885929f67659f1d3b8a01e7546568de2919cffd9e2e36 md5: e726e134a392ae5d7bafa6cc4a3d5725 @@ -6489,6 +8712,20 @@ packages: purls: [] size: 1041788 timestamp: 1771378212382 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-15.2.0-he0feb66_19.conda + sha256: 8e0a3b5e41272e5678499b5dfc4cddb673f9e935de01eb0767ce857001229f46 + md5: 57736f29cc2b0ec0b6c2952d3f101b6a + depends: + - __glibc >=2.17,<3.0.a0 + - _openmp_mutex >=4.5 + constrains: + - libgcc-ng ==15.2.0=*_19 + - libgomp 15.2.0 he0feb66_19 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + purls: [] + size: 1041084 + timestamp: 1778269013026 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libgcc-15.2.0-hcbb3090_18.conda sha256: 1d9c4f35586adb71bcd23e31b68b7f3e4c4ab89914c26bed5f2859290be5560e md5: 92df6107310b1fff92c4cc84f0de247b @@ -6527,6 +8764,16 @@ packages: purls: [] size: 3084533 timestamp: 1771377786730 +- conda: https://conda.anaconda.org/conda-forge/noarch/libgcc-devel_linux-64-15.2.0-hcc6f6b0_119.conda + sha256: 38a557eba305468ac1f90ac85e50d8defd76141cb0b8a43b2fc1aca71dd5d5f2 + md5: 683fcb168e1df9a21fa80d5aa2d9330b + depends: + - __unix + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + purls: [] + size: 3095909 + timestamp: 1778268932148 - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-15.2.0-h69a702a_18.conda sha256: e318a711400f536c81123e753d4c797a821021fb38970cebfb3f454126016893 md5: d5e96b1ed75ca01906b3d2469b4ce493 @@ -6537,6 +8784,16 @@ packages: purls: [] size: 27526 timestamp: 1771378224552 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-15.2.0-h69a702a_19.conda + sha256: 9dcf54adfaa5e861123c2da4f2f0451a685464ea7e5a41ad91cf67b31d658d98 + md5: 331ee9b72b9dff570d56b1302c5ab37d + depends: + - libgcc 15.2.0 he0feb66_19 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + purls: [] + size: 27694 + timestamp: 1778269016987 - conda: https://conda.anaconda.org/conda-forge/linux-64/libgfortran-15.2.0-h69a702a_18.conda sha256: d2c9fad338fd85e4487424865da8e74006ab2e2475bd788f624d7a39b2a72aee md5: 9063115da5bc35fdc3e1002e69b9ef6e @@ -6549,6 +8806,18 @@ packages: purls: [] size: 27523 timestamp: 1771378269450 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libgfortran-15.2.0-h69a702a_19.conda + sha256: 561a42758ef25b9ce308c4e2cf56daee4f06138385a17e29a492cd928e00be6f + md5: 42bf7eca1a951735fa06c0e3c0d5c8e6 + depends: + - libgfortran5 15.2.0 h68bc16d_19 + constrains: + - libgfortran-ng ==15.2.0=*_19 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + purls: [] + size: 27655 + timestamp: 1778269042954 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libgfortran-15.2.0-h07b0088_18.conda sha256: 63f89087c3f0c8621c5c89ecceec1e56e5e1c84f65fc9c5feca33a07c570a836 md5: 26981599908ed2205366e8fc91b37fc6 @@ -6574,6 +8843,19 @@ packages: purls: [] size: 2482475 timestamp: 1771378241063 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libgfortran5-15.2.0-h68bc16d_19.conda + sha256: 057978bb69fea29ed715a9b98adf71015c31baecc4aeb2bfc20d4fd5d83579d4 + md5: 85072b0ad177c966294f129b7c04a2d5 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=15.2.0 + constrains: + - libgfortran 15.2.0 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + purls: [] + size: 2483673 + timestamp: 1778269025089 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libgfortran5-15.2.0-hdae7583_18.conda sha256: 91033978ba25e6a60fb86843cf7e1f7dc8ad513f9689f991c9ddabfaf0361e7e md5: c4a6f7989cffb0544bfd9207b6789971 @@ -6596,6 +8878,16 @@ packages: purls: [] size: 603262 timestamp: 1771378117851 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-15.2.0-he0feb66_19.conda + sha256: 5abe4ab9d93f6c9757d654f1969ae2267d4505315c1f2f8fe705fd60af084f1b + md5: faac990cb7aedc7f3a2224f2c9b0c26c + depends: + - __glibc >=2.17,<3.0.a0 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + purls: [] + size: 603817 + timestamp: 1778268942614 - conda: https://conda.anaconda.org/conda-forge/win-64/libgomp-15.2.0-h8ee18e1_18.conda sha256: 94981bc2e42374c737750895c6fdcfc43b7126c4fc788cad0ecc7281745931da md5: 939fb173e2a4d4e980ef689e99b35223 @@ -6666,8 +8958,20 @@ packages: - jpeg <0.0.0a license: IJG AND BSD-3-Clause AND Zlib purls: [] - size: 633710 - timestamp: 1762094827865 + size: 633710 + timestamp: 1762094827865 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libjpeg-turbo-3.1.4.1-hb03c661_0.conda + sha256: 10056646c28115b174de81a44e23e3a0a3b95b5347d2e6c45cc6d49d35294256 + md5: 6178c6f2fb254558238ef4e6c56fb782 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + constrains: + - jpeg <0.0.0a + license: IJG AND BSD-3-Clause AND Zlib + purls: [] + size: 633831 + timestamp: 1775962768273 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libjpeg-turbo-3.1.2-hc919400_0.conda sha256: 6c061c56058bb10374daaef50e81b39cf43e8aee21f0037022c0c39c4f31872f md5: f0695fbecf1006f27f4395d64bd0c4b8 @@ -6707,6 +9011,21 @@ packages: purls: [] size: 18200 timestamp: 1765818857876 +- conda: https://conda.anaconda.org/conda-forge/linux-64/liblapack-3.11.0-6_h47877c9_openblas.conda + build_number: 6 + sha256: 371f517eb7010b21c6cc882c7606daccebb943307cb9a3bf2c70456a5c024f7d + md5: 881d801569b201c2e753f03c84b85e15 + depends: + - libblas 3.11.0 6_h4a7cf45_openblas + constrains: + - blas 2.306 openblas + - liblapacke 3.11.0 6*_openblas + - libcblas 3.11.0 6*_openblas + license: BSD-3-Clause + license_family: BSD + purls: [] + size: 18624 + timestamp: 1774503065378 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/liblapack-3.11.0-5_hd9741b5_openblas.conda build_number: 5 sha256: 735a6e6f7d7da6f718b6690b7c0a8ae4815afb89138aa5793abe78128e951dbb @@ -6749,6 +9068,18 @@ packages: purls: [] size: 113207 timestamp: 1768752626120 +- conda: https://conda.anaconda.org/conda-forge/linux-64/liblzma-5.8.3-hb03c661_0.conda + sha256: ec30e52a3c1bf7d0425380a189d209a52baa03f22fb66dd3eb587acaa765bd6d + md5: b88d90cad08e6bc8ad540cb310a761fb + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + constrains: + - xz 5.8.3.* + license: 0BSD + purls: [] + size: 113478 + timestamp: 1775825492909 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/liblzma-5.8.2-h8088a28_0.conda sha256: 7bfc7ffb2d6a9629357a70d4eadeadb6f88fa26ebc28f606b1c1e5e5ed99dc7e md5: 009f0d956d7bfb00de86901d16e486c7 @@ -6835,6 +9166,39 @@ packages: purls: [] size: 666600 timestamp: 1756834976695 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libnghttp2-1.68.1-h877daf1_0.conda + sha256: 663444d77a42f2265f54fb8b48c5450bfff4388d9c0f8253dd7855f0d993153f + md5: 2a45e7f8af083626f009645a6481f12d + depends: + - __glibc >=2.17,<3.0.a0 + - c-ares >=1.34.6,<2.0a0 + - libev >=4.33,<4.34.0a0 + - libev >=4.33,<5.0a0 + - libgcc >=14 + - libstdcxx >=14 + - libzlib >=1.3.1,<2.0a0 + - openssl >=3.5.5,<4.0a0 + license: MIT + license_family: MIT + purls: [] + size: 663344 + timestamp: 1773854035739 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libnghttp2-1.68.1-h8f3e76b_0.conda + sha256: 2bc7bc3978066f2c274ebcbf711850cc9ab92e023e433b9631958a098d11e10a + md5: 6ea18834adbc3b33df9bd9fb45eaf95b + depends: + - __osx >=11.0 + - c-ares >=1.34.6,<2.0a0 + - libcxx >=19 + - libev >=4.33,<4.34.0a0 + - libev >=4.33,<5.0a0 + - libzlib >=1.3.1,<2.0a0 + - openssl >=3.5.5,<4.0a0 + license: MIT + license_family: MIT + purls: [] + size: 576526 + timestamp: 1773854624224 - conda: https://conda.anaconda.org/conda-forge/linux-64/libnvptxcompiler-dev-12.9.86-ha770c72_2.conda sha256: 1e7a7b34f8639a5feb75ba864127059e4d83edfe1a516547f0dbb9941e7b8f8b md5: 3fd926c321c6dbf386aa14bd8b125bfb @@ -6845,6 +9209,16 @@ packages: purls: [] size: 27046 timestamp: 1753975516342 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libnvptxcompiler-dev-13.2.78-ha770c72_0.conda + sha256: 1ee47ea506cfacd6c06fd09afb229c68d8925c5342a40fa40d54682ae6216021 + md5: 009ab9d572c1fe55cc952600acfcacf8 + depends: + - cuda-version >=13.2,<13.3.0a0 + - libnvptxcompiler-dev_linux-64 13.2.78 ha770c72_0 + license: LicenseRef-NVIDIA-End-User-License-Agreement + purls: [] + size: 28437 + timestamp: 1776121449699 - conda: https://conda.anaconda.org/conda-forge/noarch/libnvptxcompiler-dev_linux-64-12.9.86-ha770c72_2.conda sha256: 17952c32eac197a59c119fdf3fb6f08c6a29c225a80bae141ac904ad212b87dd md5: a66a909acf08924aced622903832a937 @@ -6854,6 +9228,15 @@ packages: purls: [] size: 14422867 timestamp: 1753975387297 +- conda: https://conda.anaconda.org/conda-forge/noarch/libnvptxcompiler-dev_linux-64-13.2.78-ha770c72_0.conda + sha256: 3d12a8f80dd25b889302cd091bdbb75135938c1365496a5d7be504fe2f347cf7 + md5: 8727a04a5bc3d451d45c907d03cda88f + depends: + - cuda-version >=13.2,<13.3.0a0 + license: LicenseRef-NVIDIA-End-User-License-Agreement + purls: [] + size: 15164138 + timestamp: 1776121337288 - conda: https://conda.anaconda.org/conda-forge/linux-64/libopenblas-0.3.30-pthreads_h94d23a6_4.conda sha256: 199d79c237afb0d4780ccd2fbf829cea80743df60df4705202558675e07dd2c5 md5: be43915efc66345cccb3c310b6ed0374 @@ -6869,6 +9252,21 @@ packages: purls: [] size: 5927939 timestamp: 1763114673331 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libopenblas-0.3.32-pthreads_h94d23a6_0.conda + sha256: 6dc30b28f32737a1c52dada10c8f3a41bc9e021854215efca04a7f00487d09d9 + md5: 89d61bc91d3f39fda0ca10fcd3c68594 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + - libgfortran + - libgfortran5 >=14.3.0 + constrains: + - openblas >=0.3.32,<0.3.33.0a0 + license: BSD-3-Clause + license_family: BSD + purls: [] + size: 5928890 + timestamp: 1774471724897 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libopenblas-0.3.30-openmp_ha158390_4.conda sha256: ebbbc089b70bcde87c4121a083c724330f02a690fb9d7c6cd18c30f1b12504fa md5: a6f6d3a31bb29e48d37ce65de54e2df0 @@ -6895,6 +9293,17 @@ packages: purls: [] size: 317669 timestamp: 1770691470744 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libpng-1.6.58-h421ea60_0.conda + sha256: 377cfe037f3eeb3b1bf3ad333f724a64d32f315ee1958581fc671891d63d3f89 + md5: eba48a68a1a2b9d3c0d9511548db85db + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + - libzlib >=1.3.2,<2.0a0 + license: zlib-acknowledgement + purls: [] + size: 317729 + timestamp: 1776315175087 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libpng-1.6.55-h132b30e_0.conda sha256: 7a4fd29a6ee2d7f7a6e610754dfdf7410ed08f40d8d8b488a27bc0f9981d5abb md5: 871dc88b0192ac49b6a5509932c31377 @@ -6929,6 +9338,18 @@ packages: purls: [] size: 7949259 timestamp: 1771377982207 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libsanitizer-15.2.0-h90f66d4_19.conda + sha256: 7a58892a52739ce4c0f7109de9e91b4353104748eb04fc6441d88e8af444ba99 + md5: 67eef12ce33f7ff99900c212d7076fc2 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=15.2.0 + - libstdcxx >=15.2.0 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + purls: [] + size: 7930689 + timestamp: 1778269054623 - conda: https://conda.anaconda.org/conda-forge/linux-64/libsodium-1.0.21-h280c20c_3.conda sha256: 64e5c80cbce4680a2d25179949739a6def695d72c40ca28f010711764e372d97 md5: 7af961ef4aa2c1136e11dd43ded245ab @@ -6971,6 +9392,17 @@ packages: purls: [] size: 951405 timestamp: 1772818874251 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.53.1-h0c1763c_0.conda + sha256: 54cdcd3214313b62c2a8ee277e6f42150d9b748264c1b70d958bf735e420ef8d + md5: 7dc38adcbf71e6b38748e919e16e0dce + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + - libzlib >=1.3.2,<2.0a0 + license: blessing + purls: [] + size: 954962 + timestamp: 1777986471789 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.52.0-h1ae2325_0.conda sha256: beb0fd5594d6d7c7cd42c992b6bb4d66cbb39d6c94a8234f15956da99a04306c md5: f6233a3fddc35a2ec9f617f79d6f3d71 @@ -7006,6 +9438,31 @@ packages: purls: [] size: 304790 timestamp: 1745608545575 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libssh2-1.11.1-h1590b86_0.conda + sha256: 8bfe837221390ffc6f111ecca24fa12d4a6325da0c8d131333d63d6c37f27e0a + md5: b68e8f66b94b44aaa8de4583d3d4cc40 + depends: + - libzlib >=1.3.1,<2.0a0 + - openssl >=3.5.0,<4.0a0 + license: BSD-3-Clause + license_family: BSD + purls: [] + size: 279193 + timestamp: 1745608793272 +- conda: https://conda.anaconda.org/conda-forge/win-64/libssh2-1.11.1-h9aa295b_0.conda + sha256: cbdf93898f2e27cefca5f3fe46519335d1fab25c4ea2a11b11502ff63e602c09 + md5: 9dce2f112bfd3400f4f432b3d0ac07b2 + depends: + - libzlib >=1.3.1,<2.0a0 + - openssl >=3.5.0,<4.0a0 + - ucrt >=10.0.20348.0 + - vc >=14.2,<15 + - vc14_runtime >=14.29.30139 + license: BSD-3-Clause + license_family: BSD + purls: [] + size: 292785 + timestamp: 1745608759342 - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-15.2.0-h934c35e_18.conda sha256: 78668020064fdaa27e9ab65cd2997e2c837b564ab26ce3bf0e58a2ce1a525c6e md5: 1b08cd684f34175e4514474793d44bcb @@ -7019,6 +9476,19 @@ packages: purls: [] size: 5852330 timestamp: 1771378262446 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-15.2.0-h934c35e_19.conda + sha256: dff1058c76ec6b8759e41cefa2508162d00e4a5e6721aa68ec3fd10094e702dc + md5: 5794b3bdc38177caf969dabd3af08549 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc 15.2.0 he0feb66_19 + constrains: + - libstdcxx-ng ==15.2.0=*_19 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + purls: [] + size: 5852044 + timestamp: 1778269036376 - conda: https://conda.anaconda.org/conda-forge/noarch/libstdcxx-devel_linux-64-14.3.0-h9f08a49_118.conda sha256: b1c3824769b92a1486bf3e2cc5f13304d83ae613ea061b7bc47bb6080d6dfdba md5: 865a399bce236119301ebd1532fced8d @@ -7029,6 +9499,16 @@ packages: purls: [] size: 20171098 timestamp: 1771377827750 +- conda: https://conda.anaconda.org/conda-forge/noarch/libstdcxx-devel_linux-64-15.2.0-hd446a21_119.conda + sha256: a2385f3611d5cd25378f9cf2367183320731709c067ddd08d43330d3170f15b8 + md5: bcfe7eae40158c3e355d2f9d3ed41230 + depends: + - __unix + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + purls: [] + size: 20765069 + timestamp: 1778268963689 - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-15.2.0-hdf11a46_18.conda sha256: 3c902ffd673cb3c6ddde624cdb80f870b6c835f8bf28384b0016e7d444dd0145 md5: 6235adb93d064ecdf3d44faee6f468de @@ -7039,6 +9519,16 @@ packages: purls: [] size: 27575 timestamp: 1771378314494 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-15.2.0-hdf11a46_19.conda + sha256: 0672b6b6e1791c92e8eccad58081a99d614fcf82bca5841f9dfa3c3e658f83b9 + md5: e5ce228e579726c07255dbf90dc62101 + depends: + - libstdcxx 15.2.0 h934c35e_19 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + purls: [] + size: 27776 + timestamp: 1778269074600 - conda: https://conda.anaconda.org/conda-forge/linux-64/libtasn1-4.21.0-hb03c661_0.conda sha256: a3f0c33ef567eb2e3a22d7fea0717a294a5fea4964478aa06b467ce1c93bec38 md5: 0ffe6217a3d09398155d32a2ddb41251 @@ -7133,6 +9623,17 @@ packages: purls: [] size: 40311 timestamp: 1766271528534 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.42-h5347b49_0.conda + sha256: bc1b08c92626c91500fd9f26f2c797f3eb153b627d53e9c13cd167f1e12b2829 + md5: 38ffe67b78c9d4de527be8315e5ada2c + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + license: BSD-3-Clause + license_family: BSD + purls: [] + size: 40297 + timestamp: 1775052476770 - conda: https://conda.anaconda.org/conda-forge/linux-64/libwebp-base-1.6.0-hd42ef1d_0.conda sha256: 3aed21ab28eddffdaf7f804f49be7a7d701e8f0e46c856d801270b470820a37b md5: aea31d2e5b1091feca96fcfe945c3cf9 @@ -7242,6 +9743,22 @@ packages: purls: [] size: 45968 timestamp: 1772704614539 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libxml2-2.15.3-h49c6c72_0.conda + sha256: 3bc5551720c58591f6ea1146f7d1539c734ed1c40e7b9f5cb8cb7e900c509aba + md5: 995d8c8bad2a3cc8db14675a153dec2b + depends: + - __glibc >=2.17,<3.0.a0 + - icu >=78.3,<79.0a0 + - libgcc >=14 + - libiconv >=1.18,<2.0a0 + - liblzma >=5.8.3,<6.0a0 + - libxml2-16 2.15.3 hca6bf5a_0 + - libzlib >=1.3.2,<2.0a0 + license: MIT + license_family: MIT + purls: [] + size: 46810 + timestamp: 1776376751152 - conda: https://conda.anaconda.org/conda-forge/win-64/libxml2-2.15.2-h5d26750_0.conda sha256: f905eb7046987c336122121759e7f09144729f6898f48cd06df2a945b86998d8 md5: 1007e1bfe181a2aee214779ee7f13d30 @@ -7277,6 +9794,23 @@ packages: purls: [] size: 557492 timestamp: 1772704601644 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libxml2-16-2.15.3-hca6bf5a_0.conda + sha256: 3d44f737c5ae52d5af32682cc1530df433f401f8e58a7533926536244127572a + md5: e79d2c2f24b027aa8d5ab1b1ba3061e7 + depends: + - __glibc >=2.17,<3.0.a0 + - icu >=78.3,<79.0a0 + - libgcc >=14 + - libiconv >=1.18,<2.0a0 + - liblzma >=5.8.3,<6.0a0 + - libzlib >=1.3.2,<2.0a0 + constrains: + - libxml2 2.15.3 + license: MIT + license_family: MIT + purls: [] + size: 559775 + timestamp: 1776376739004 - conda: https://conda.anaconda.org/conda-forge/win-64/libxml2-16-2.15.2-h692994f_0.conda sha256: b8c71b3b609c7cfe17f3f2a47c75394d7b30acfb8b34ad7a049ea8757b4d33df md5: e365238134188e42ed36ee996159d482 @@ -7295,45 +9829,44 @@ packages: purls: [] size: 520078 timestamp: 1772704728534 -- conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.1-hb9d3cd8_2.conda - sha256: d4bfe88d7cb447768e31650f06257995601f89076080e76df55e3112d4e47dc4 - md5: edb0dca6bc32e4f4789199455a1dbeb8 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.2-h25fd6f3_2.conda + sha256: 55044c403570f0dc26e6364de4dc5368e5f3fc7ff103e867c487e2b5ab2bcda9 + md5: d87ff7921124eccd67248aa483c23fec depends: - __glibc >=2.17,<3.0.a0 - - libgcc >=13 constrains: - - zlib 1.3.1 *_2 + - zlib 1.3.2 *_2 license: Zlib license_family: Other purls: [] - size: 60963 - timestamp: 1727963148474 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.3.1-h8359307_2.conda - sha256: ce34669eadaba351cd54910743e6a2261b67009624dbc7daeeafdef93616711b - md5: 369964e85dc26bfe78f41399b366c435 + size: 63629 + timestamp: 1774072609062 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.3.2-h8088a28_2.conda + sha256: 361415a698514b19a852f5d1123c5da746d4642139904156ddfca7c922d23a05 + md5: bc5a5721b6439f2f62a84f2548136082 depends: - __osx >=11.0 constrains: - - zlib 1.3.1 *_2 + - zlib 1.3.2 *_2 license: Zlib license_family: Other purls: [] - size: 46438 - timestamp: 1727963202283 -- conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.3.1-h2466b09_2.conda - sha256: ba945c6493449bed0e6e29883c4943817f7c79cbff52b83360f7b341277c6402 - md5: 41fbfac52c601159df6c01f875de31b9 + size: 47759 + timestamp: 1774072956767 +- conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.3.2-hfd05255_2.conda + sha256: 88609816e0cc7452bac637aaf65783e5edf4fee8a9f8e22bdc3a75882c536061 + md5: dbabbd6234dea34040e631f87676292f depends: - ucrt >=10.0.20348.0 - - vc >=14.2,<15 - - vc14_runtime >=14.29.30139 + - vc >=14.3,<15 + - vc14_runtime >=14.44.35208 constrains: - - zlib 1.3.1 *_2 + - zlib 1.3.2 *_2 license: Zlib license_family: Other purls: [] - size: 55476 - timestamp: 1727963768015 + size: 58347 + timestamp: 1774072851498 - conda: https://conda.anaconda.org/conda-forge/noarch/linkify-it-py-2.1.0-pyhcf101f3_0.conda sha256: 991a82fbb64aba6d10719a017ce354e28df02ea5df1d9c7b0221da573c168d27 md5: 1005e1f39083adad2384772e8e384e43 @@ -7468,6 +10001,18 @@ packages: - pkg:pypi/markdown-it-py?source=hash-mapping size: 64736 timestamp: 1754951288511 +- conda: https://conda.anaconda.org/conda-forge/noarch/markdown-it-py-4.2.0-pyhd8ed1ab_0.conda + sha256: 0c4c35376fe920714390d46e4b8d31c876d65f18e1655899e0763ec25f2a902f + md5: 6d03368f2b2b0a5fb6839df53b2eb5e0 + depends: + - mdurl >=0.1,<1 + - python >=3.10 + license: MIT + license_family: MIT + purls: + - pkg:pypi/markdown-it-py?source=hash-mapping + size: 69017 + timestamp: 1778169663339 - conda: https://conda.anaconda.org/conda-forge/linux-64/markupsafe-3.0.3-py314h67df5f8_1.conda sha256: c279be85b59a62d5c52f5dd9a4cd43ebd08933809a8416c22c3131595607d4cf md5: 9a17c4307d23318476d7fbf0fedc0cde @@ -7547,6 +10092,36 @@ packages: - pkg:pypi/matplotlib?source=hash-mapping size: 8473358 timestamp: 1763055439346 +- conda: https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.10.9-py314h1194b4b_0.conda + sha256: 94599b0ca937530f7c7ba1e394cbe8420db613da2524bd0000988e9bbe118f0a + md5: 11a821746ad11e642fcc615c3d66aa44 + depends: + - __glibc >=2.17,<3.0.a0 + - contourpy >=1.0.1 + - cycler >=0.10 + - fonttools >=4.22.0 + - freetype + - kiwisolver >=1.3.1 + - libfreetype >=2.14.3 + - libfreetype6 >=2.14.3 + - libgcc >=14 + - libstdcxx >=14 + - numpy >=1.23 + - numpy >=1.23,<3 + - packaging >=20.0 + - pillow >=8 + - pyparsing >=2.3.1 + - python >=3.14,<3.15.0a0 + - python-dateutil >=2.7 + - python_abi 3.14.* *_cp314 + - qhull >=2020.2,<2020.3.0a0 + - tk >=8.6.13,<8.7.0a0 + license: PSF-2.0 + license_family: PSF + purls: + - pkg:pypi/matplotlib?source=hash-mapping + size: 8545652 + timestamp: 1777000575998 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/matplotlib-base-3.10.8-py314hd63e3f0_0.conda sha256: 198dcc0ed83e78bc7bf48e6ef8d4ecd220e9cf1f07db98508251b2bc0be067f9 md5: c84152e510d41378b8758826655b6ed7 @@ -7617,6 +10192,18 @@ packages: - pkg:pypi/matplotlib-inline?source=hash-mapping size: 15175 timestamp: 1761214578417 +- conda: https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.2.2-pyhd8ed1ab_0.conda + sha256: 35b43d7343f74452307fd018a1cca92b8f68961ff8e2ab6a81ce0a703c9a3764 + md5: 9acc1c385be401d533ff70ef5b50dae6 + depends: + - python >=3.10 + - traitlets + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/matplotlib-inline?source=compressed-mapping + size: 15725 + timestamp: 1778264403247 - conda: https://conda.anaconda.org/conda-forge/noarch/mdit-py-plugins-0.5.0-pyhd8ed1ab_0.conda sha256: 123cc004e2946879708cdb6a9eff24acbbb054990d6131bb94bca7a374ebebfc md5: 1997a083ef0b4c9331f9191564be275e @@ -7629,6 +10216,18 @@ packages: - pkg:pypi/mdit-py-plugins?source=hash-mapping size: 43805 timestamp: 1754946862113 +- conda: https://conda.anaconda.org/conda-forge/noarch/mdit-py-plugins-0.6.0-pyhd8ed1ab_0.conda + sha256: 443e7f8ae88f71b3e7fd9c3d19d3816fb1965e2352d5e01a6bfdf2eccfcf4795 + md5: 9a704e945e87078f464726c69071677a + depends: + - markdown-it-py >=2.0.0,<5.0.0 + - python >=3.10 + license: MIT + license_family: MIT + purls: + - pkg:pypi/mdit-py-plugins?source=hash-mapping + size: 50607 + timestamp: 1778171019802 - pypi: https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl name: mdurl version: 0.1.2 @@ -7665,6 +10264,26 @@ packages: - pkg:pypi/memray?source=hash-mapping size: 1845157 timestamp: 1773493681427 +- conda: https://conda.anaconda.org/conda-forge/linux-64/memray-1.19.3-py314hef15ded_0.conda + sha256: 729c193d71291cfc24db8161199ed9f3508579f1b2b7eb250f25cbf903dd58d9 + md5: b2ace6799650355733a5eab297301940 + depends: + - python + - rich >=11.2.0 + - jinja2 + - textual >=0.34.0 + - libgcc >=14 + - __glibc >=2.17,<3.0.a0 + - libstdcxx >=14 + - elfutils >=0.194,<0.195.0a0 + - libunwind >=1.8.3,<1.9.0a0 + - python_abi 3.14.* *_cp314 + - lz4-c >=1.10.0,<1.11.0a0 + license: Apache-2.0 AND BSD-3-Clause + purls: + - pkg:pypi/memray?source=hash-mapping + size: 1849063 + timestamp: 1775683239172 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/memray-1.19.2-py314h44d60dd_0.conda sha256: 5e053a64dbcdc98bd470f358f3fce1cde9b9fe362280a87cb66f1587e9f09e26 md5: f29f08f053a687bcfe09089f8a410bc9 @@ -7696,6 +10315,19 @@ packages: - pkg:pypi/mistune?source=hash-mapping size: 74250 timestamp: 1766504456031 +- conda: https://conda.anaconda.org/conda-forge/noarch/mistune-3.2.1-pyhcf101f3_0.conda + sha256: b52dc6c78fbbe7a3008535cb8bfd87d70d8053e9250bbe16e387470a9df07070 + md5: b97e84d1553b4a1c765b87fff83453ad + depends: + - python >=3.10 + - typing_extensions + - python + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/mistune?source=hash-mapping + size: 74567 + timestamp: 1777824616382 - conda: https://conda.anaconda.org/conda-forge/win-64/mkl-2025.3.0-hac47afa_455.conda sha256: b2b4c84b95210760e4d12319416c60ab66e03674ccdcbd14aeb59f82ebb1318d md5: fd05d1e894497b012d05a804232254ed @@ -7758,6 +10390,15 @@ packages: - pylint>=2.6.0 ; extra == 'dev' - pyink ; extra == 'dev' requires_python: '>=3.9' +- pypi: https://files.pythonhosted.org/packages/25/1f/cca084ca2572810fff12ea9dbdcbe39eac048f40daf4a9077b49fcbe8cee/msgspec-0.21.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + name: msgspec + version: 0.21.1 + sha256: 3d6b9dc50948eaf65df54d2fd0ff66e6d8c32f116037209ee861810eb9b676cb + requires_dist: + - tomli ; python_full_version < '3.11' and extra == 'toml' + - tomli-w ; extra == 'toml' + - pyyaml ; extra == 'yaml' + requires_python: '>=3.10' - conda: https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyhd8ed1ab_1.conda sha256: d09c47c2cf456de5c09fa66d2c3c5035aa1fa228a1983a433c47b876aa16ce90 md5: 37293a85a0f4f77bbd9cf7aaefc62609 @@ -7791,6 +10432,28 @@ packages: - sqlparse ; extra == 'sql' - sqlframe>=3.22.0,!=3.39.3 ; extra == 'sqlframe' requires_python: '>=3.9' +- pypi: https://files.pythonhosted.org/packages/c7/e1/68c2256b69a314eba133673377ba9118c356f6342a0c02b61de449cf2bf2/narwhals-2.21.0-py3-none-any.whl + name: narwhals + version: 2.21.0 + sha256: 1e6617d0fca68ae1fda29e5397c4eaacd3ffc9fffe6bcd6ded0c690475e853be + requires_dist: + - cudf-cu12>=24.10.0 ; extra == 'cudf' + - dask[dataframe]>=2024.8 ; extra == 'dask' + - duckdb>=1.1 ; extra == 'duckdb' + - ibis-framework>=6.0.0 ; extra == 'ibis' + - packaging ; extra == 'ibis' + - pyarrow-hotfix ; extra == 'ibis' + - rich ; extra == 'ibis' + - modin ; extra == 'modin' + - pandas>=1.1.3 ; extra == 'pandas' + - polars>=0.20.4 ; extra == 'polars' + - pyarrow>=13.0.0 ; extra == 'pyarrow' + - pyspark>=3.5.0 ; extra == 'pyspark' + - pyspark[connect]>=3.5.0 ; extra == 'pyspark-connect' + - duckdb>=1.1 ; extra == 'sql' + - sqlparse ; extra == 'sql' + - sqlframe>=3.22.0,!=3.39.3 ; extra == 'sqlframe' + requires_python: '>=3.9' - conda: https://conda.anaconda.org/conda-forge/noarch/nbclient-0.10.4-pyhd8ed1ab_0.conda sha256: 1b66960ee06874ddceeebe375d5f17fb5f393d025a09e15b830ad0c4fffb585b md5: 00f5b8dafa842e0c27c1cd7296aa4875 @@ -7836,6 +10499,36 @@ packages: - pkg:pypi/nbconvert?source=compressed-mapping size: 202284 timestamp: 1769709543555 +- conda: https://conda.anaconda.org/conda-forge/noarch/nbconvert-core-7.17.1-pyhcf101f3_0.conda + sha256: ab2ac79c5892c5434d50b3542d96645bdaa06d025b6e03734be29200de248ac2 + md5: 2bce0d047658a91b99441390b9b27045 + depends: + - beautifulsoup4 + - bleach-with-css !=5.0.0 + - defusedxml + - importlib-metadata >=3.6 + - jinja2 >=3.0 + - jupyter_core >=4.7 + - jupyterlab_pygments + - markupsafe >=2.0 + - mistune >=2.0.3,<4 + - nbclient >=0.5.0 + - nbformat >=5.7 + - packaging + - pandocfilters >=1.4.1 + - pygments >=2.4.1 + - python >=3.10 + - traitlets >=5.1 + - python + constrains: + - pandoc >=2.9.2,<4.0.0 + - nbconvert ==7.17.1 *_0 + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/nbconvert?source=hash-mapping + size: 202229 + timestamp: 1775615493260 - conda: https://conda.anaconda.org/conda-forge/noarch/nbformat-5.10.4-pyhd8ed1ab_1.conda sha256: 7a5bd30a2e7ddd7b85031a5e2e14f290898098dc85bea5b3a5bf147c25122838 md5: bbe1963f1e47f594070ffe87cdf612ea @@ -7861,6 +10554,16 @@ packages: purls: [] size: 891641 timestamp: 1738195959188 +- conda: https://conda.anaconda.org/conda-forge/linux-64/ncurses-6.6-hdb14827_0.conda + sha256: fc89f74bbe362fb29fa3c037697a89bec140b346a2469a90f7936d1d7ea4d8a3 + md5: fc21868a1a5aacc937e7a18747acb8a5 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + license: X11 AND BSD-3-Clause + purls: [] + size: 918956 + timestamp: 1777422145199 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ncurses-6.5-h5e97a16_3.conda sha256: 2827ada40e8d9ca69a153a45f7fd14f32b2ead7045d3bbb5d10964898fe65733 md5: 068d497125e4bf8a66bf707254fff5ae @@ -7946,6 +10649,26 @@ packages: - pkg:pypi/numpy?source=hash-mapping size: 8926994 timestamp: 1770098474394 +- conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-2.4.3-py314h2b28147_0.conda + sha256: f2ba8cb0d86a6461a6bcf0d315c80c7076083f72c6733c9290086640723f79ec + md5: 36f5b7eb328bdc204954a2225cf908e2 + depends: + - python + - libstdcxx >=14 + - libgcc >=14 + - __glibc >=2.17,<3.0.a0 + - python_abi 3.14.* *_cp314 + - libcblas >=3.9.0,<4.0a0 + - liblapack >=3.9.0,<4.0a0 + - libblas >=3.9.0,<4.0a0 + constrains: + - numpy-base <0a0 + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/numpy?source=hash-mapping + size: 8927860 + timestamp: 1773839233468 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-2.4.2-py314hae46ccb_1.conda sha256: 43b5ed0ead36e5133ee8462916d23284f0bce0e5f266fa4bd31a020a6cc22f14 md5: 0f0ddf0575b98d91cda9e3ca9eaeb9a2 @@ -7986,31 +10709,72 @@ packages: - pkg:pypi/numpy?source=hash-mapping size: 7309134 timestamp: 1770098414535 +- pypi: https://files.pythonhosted.org/packages/f8/79/0cefdaa1d9e45018a227bac64a79b92d2733cde28a8fd09c65362de08622/nvidia_cublas-13.4.1.1-py3-none-manylinux_2_27_x86_64.whl + name: nvidia-cublas + version: 13.4.1.1 + sha256: 28c983c8c03aa9a2d7b36cddcef2bfeeea85e13241d77df7622665502159f347 + requires_dist: + - nvidia-cuda-nvrtc + requires_python: '>=3' - pypi: https://files.pythonhosted.org/packages/77/3c/aa88abe01f3be3d1f8f787d1d33dc83e76fec05945f9a28fbb41cfb99cd5/nvidia_cublas_cu12-12.9.1.4-py3-none-manylinux_2_27_x86_64.whl name: nvidia-cublas-cu12 version: 12.9.1.4 sha256: 453611eb21a7c1f2c2156ed9f3a45b691deda0440ec550860290dc901af5b4c2 requires_python: '>=3' +- pypi: https://files.pythonhosted.org/packages/92/87/d23db8276b76b4a7e4a702eebdc0a70e3b56c17b4dcd980ecb0f68b022e1/nvidia_cuda_cccl-13.2.75-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + name: nvidia-cuda-cccl + version: 13.2.75 + sha256: 11a2b1948e8709805a0ccf04441baf5279a9219c13eb11dc13d57bb023151768 + requires_python: '>=3' - pypi: https://files.pythonhosted.org/packages/18/2a/d4cd8506d2044e082f8cd921be57392e6a9b5ccd3ffdf050362430a3d5d5/nvidia_cuda_cccl_cu12-12.9.27-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl name: nvidia-cuda-cccl-cu12 version: 12.9.27 sha256: 37869e17ce2e1ecec6eddf1927cca0f8c34e64fd848d40453df559091e2d7117 requires_python: '>=3' +- pypi: https://files.pythonhosted.org/packages/ea/78/501eee5cce9202fba2f3476529e296a7f6d003261d80b52ab0abfa09ddd6/nvidia_cuda_crt-13.2.78-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + name: nvidia-cuda-crt + version: 13.2.78 + sha256: 2c8615ee30ed466cb6298ecb8ffe9e6ea8b252ca833206152d155750bf831608 + requires_python: '>=3' +- pypi: https://files.pythonhosted.org/packages/b7/2d/cbf8f6288259c502165282fdaa2b733daae98434e3f2aee2b7952ba87c6f/nvidia_cuda_cupti-13.2.75-py3-none-manylinux_2_25_x86_64.whl + name: nvidia-cuda-cupti + version: 13.2.75 + sha256: f75aca6bef89c625a4076a820302bb06764daa1d21595286f6bee5e237d3a187 + requires_python: '>=3' - pypi: https://files.pythonhosted.org/packages/c1/2e/b84e32197e33f39907b455b83395a017e697c07a449a2b15fd07fc1c9981/nvidia_cuda_cupti_cu12-12.9.79-py3-none-manylinux_2_25_x86_64.whl name: nvidia-cuda-cupti-cu12 version: 12.9.79 sha256: 096bcf334f13e1984ba36685ad4c1d6347db214de03dbb6eebb237b41d9d934f requires_python: '>=3' +- pypi: https://files.pythonhosted.org/packages/65/0f/c7c7d538c61794130e759ad74710ab5aa8cab1f700ee1754381f8c665605/nvidia_cuda_nvcc-13.2.78-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + name: nvidia-cuda-nvcc + version: 13.2.78 + sha256: c3bd144dd9b6b25e062589acb7bbd43d93d3120c72fad71da808f9817aba1239 + requires_dist: + - nvidia-nvvm + - nvidia-cuda-runtime + - nvidia-cuda-crt + requires_python: '>=3' - pypi: https://files.pythonhosted.org/packages/25/48/b54a06168a2190572a312bfe4ce443687773eb61367ced31e064953dd2f7/nvidia_cuda_nvcc_cu12-12.9.86-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl name: nvidia-cuda-nvcc-cu12 version: 12.9.86 sha256: 5d6a0d32fdc7ea39917c20065614ae93add6f577d840233237ff08e9a38f58f0 requires_python: '>=3' +- pypi: https://files.pythonhosted.org/packages/5f/96/237b40b171e06eb65905375c4ad5c96f78c2f861ac6e8ae7f650d95e1dfd/nvidia_cuda_nvrtc-13.2.78-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl + name: nvidia-cuda-nvrtc + version: 13.2.78 + sha256: a9049031da08cbedd0c20e3470e5a978dc330af0e0326b3b05774718c665dc3e + requires_python: '>=3' - pypi: https://files.pythonhosted.org/packages/b8/85/e4af82cc9202023862090bfca4ea827d533329e925c758f0cde964cb54b7/nvidia_cuda_nvrtc_cu12-12.9.86-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl name: nvidia-cuda-nvrtc-cu12 version: 12.9.86 sha256: 210cf05005a447e29214e9ce50851e83fc5f4358df8b453155d5e1918094dcb4 requires_python: '>=3' +- pypi: https://files.pythonhosted.org/packages/dc/74/f1493b0774c6eaf0234512bb650e1ab90ce8f61fecf0b4aaf1fb416f571e/nvidia_cuda_runtime-13.2.75-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + name: nvidia-cuda-runtime + version: 13.2.75 + sha256: 72bf454902da594e0b833cadeddc8b7100ce1c7cf7ed9023943931be1aa913b7 + requires_python: '>=3' - pypi: https://files.pythonhosted.org/packages/bc/46/a92db19b8309581092a3add7e6fceb4c301a3fd233969856a8cbf042cd3c/nvidia_cuda_runtime_cu12-12.9.79-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl name: nvidia-cuda-runtime-cu12 version: 12.9.79 @@ -8023,6 +10787,20 @@ packages: requires_dist: - nvidia-cublas-cu12 requires_python: '>=3' +- pypi: https://files.pythonhosted.org/packages/57/96/ce2cb84b5e8bb94dd55f554e3454b91e9ecd6708aa27d4a7b12f287613bc/nvidia_cudnn_cu13-9.22.0.52-py3-none-manylinux_2_27_x86_64.whl + name: nvidia-cudnn-cu13 + version: 9.22.0.52 + sha256: 7b24277af8cd2e4e5be731f5cf910255105d4b92481999771b99dbffee75d03e + requires_dist: + - nvidia-cublas + requires_python: '>=3' +- pypi: https://files.pythonhosted.org/packages/36/3e/8d717a6e1f6e27b85b64650b1104dbcf6108c9dc7e27e9e26a0d8e936cc5/nvidia_cufft-12.2.0.46-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + name: nvidia-cufft + version: 12.2.0.46 + sha256: a9667ae4d81b9e54ddbbad24a9e72334f89d4fc184566d05ef028e2760c820eb + requires_dist: + - nvidia-nvjitlink + requires_python: '>=3' - pypi: https://files.pythonhosted.org/packages/95/f4/61e6996dd20481ee834f57a8e9dca28b1869366a135e0d42e2aa8493bdd4/nvidia_cufft_cu12-11.4.1.4-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl name: nvidia-cufft-cu12 version: 11.4.1.4 @@ -8030,6 +10808,15 @@ packages: requires_dist: - nvidia-nvjitlink-cu12 requires_python: '>=3' +- pypi: https://files.pythonhosted.org/packages/6b/97/a3c41eac54c89f6aac788d2b3ccd6642b32aa6b79650af3dedb8ee7c2bfa/nvidia_cusolver-12.2.0.1-py3-none-manylinux_2_27_x86_64.whl + name: nvidia-cusolver + version: 12.2.0.1 + sha256: 4693ea3c2a5d20369da7b5a4970a41df9b40f1b6f2ef9909c95f7c8c8c5ffb4d + requires_dist: + - nvidia-cublas + - nvidia-nvjitlink + - nvidia-cusparse + requires_python: '>=3' - pypi: https://files.pythonhosted.org/packages/33/40/79b0c64d44d6c166c0964ec1d803d067f4a145cca23e23925fd351d0e642/nvidia_cusolver_cu12-11.7.5.82-py3-none-manylinux_2_27_x86_64.whl name: nvidia-cusolver-cu12 version: 11.7.5.82 @@ -8039,6 +10826,13 @@ packages: - nvidia-nvjitlink-cu12 - nvidia-cusparse-cu12 requires_python: '>=3' +- pypi: https://files.pythonhosted.org/packages/b7/bd/bad43b37bcf13167637bef26399693d517b95092d742e8749eda5f4a85f3/nvidia_cusparse-12.7.10.1-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + name: nvidia-cusparse + version: 12.7.10.1 + sha256: f0d110640aa63e7182fa787cc245afa07c5fb84ac30f1c4029e4fa3012353172 + requires_dist: + - nvidia-nvjitlink + requires_python: '>=3' - pypi: https://files.pythonhosted.org/packages/12/46/b0fd4b04f86577921feb97d8e2cf028afe04f614d17fb5013de9282c9216/nvidia_cusparse_cu12-12.5.10.65-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl name: nvidia-cusparse-cu12 version: 12.5.10.65 @@ -8051,6 +10845,16 @@ packages: version: 2.29.7 sha256: ecd0a012051abc20c1aa87328841efa8cade3ced65803046e38c2f03c0891fea requires_python: '>=3' +- pypi: https://files.pythonhosted.org/packages/3e/93/6d020a69fc37e57fae8a96ab0c53102d96538db256e933e914d100e5a430/nvidia_nccl_cu13-2.30.4-py3-none-manylinux_2_18_x86_64.whl + name: nvidia-nccl-cu13 + version: 2.30.4 + sha256: 534dbf3058cadb625f08ab0d17f1dffad3b961a2bfa360d66633fcf21be53f57 + requires_python: '>=3' +- pypi: https://files.pythonhosted.org/packages/1e/b5/dae67f0c45516cfaff2d7fba873c7425c2866d4c9ede5c14a269d89ed79b/nvidia_nvjitlink-13.2.78-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl + name: nvidia-nvjitlink + version: 13.2.78 + sha256: 27964b6702aeceee05fc0ab47b4c97e3f8966bd47d05d9827e913c49a025656b + requires_python: '>=3' - pypi: https://files.pythonhosted.org/packages/46/0c/c75bbfb967457a0b7670b8ad267bfc4fffdf341c074e0a80db06c24ccfd4/nvidia_nvjitlink_cu12-12.9.86-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl name: nvidia-nvjitlink-cu12 version: 12.9.86 @@ -8063,6 +10867,18 @@ packages: requires_dist: - nvidia-cuda-cccl-cu12 requires_python: '>=3' +- pypi: https://files.pythonhosted.org/packages/5d/7b/2ab033584a3339552472ac8d79543c503a0e06dd0d082448b06697e7f716/nvidia_nvshmem_cu13-3.6.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl + name: nvidia-nvshmem-cu13 + version: 3.6.5 + sha256: 4001aabc72ead32ecc3c9add3c6781befcb71adcbe286d7f5956042e68668c70 + requires_dist: + - nvidia-cuda-cccl + requires_python: '>=3' +- pypi: https://files.pythonhosted.org/packages/e8/1f/930d63ccc8adcdf27bfc051a24e3e4da2cf6ef987848d6d1d642e29d704b/nvidia_nvvm-13.2.78-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl + name: nvidia-nvvm + version: 13.2.78 + sha256: f5aa433631109bbdec81802c5b5f319bf10bc891fe2f212e4e445845211d6f77 + requires_python: '>=3' - conda: https://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.5.4-h55fea9a_0.conda sha256: 3900f9f2dbbf4129cf3ad6acf4e4b6f7101390b53843591c53b00f034343bc4d md5: 11b3379b191f63139e29c0d19dee24cd @@ -8119,6 +10935,18 @@ packages: purls: [] size: 3164551 timestamp: 1769555830639 +- conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.6.2-h35e630c_0.conda + sha256: c0ef482280e38c71a08ad6d71448194b719630345b0c9c60744a2010e8a8e0cb + md5: da1b85b6a87e141f5140bb9924cecab0 + depends: + - __glibc >=2.17,<3.0.a0 + - ca-certificates + - libgcc >=14 + license: Apache-2.0 + license_family: Apache + purls: [] + size: 3167099 + timestamp: 1775587756857 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openssl-3.6.1-hd24854e_1.conda sha256: 361f5c5e60052abc12bdd1b50d7a1a43e6a6653aab99a2263bf2288d709dcf67 md5: f4f6ad63f98f64191c3e77c5f5f29d76 @@ -8148,22 +10976,21 @@ packages: version: 3.4.0 sha256: 69bb92469f86a1565195ece4ac0323943e83477171b91d24c35afe028a90d7cd requires_python: '>=3.8' -- pypi: https://files.pythonhosted.org/packages/91/85/08c4e13a90a13c509d1fe09596dd8198338b6cfff9ee280f01ae7694889e/optimagic-0.5.3-py3-none-any.whl +- pypi: git+https://github.com/optimagic-dev/optimagic.git?branch=probability-allow-fixed-entries#ecd9ebe0dfb3abc09ab015d78774a30794d779d0 name: optimagic - version: 0.5.3 - sha256: 6723076dad2c186a7f7871e5676eeb579f340030c988136196246e0fe8995a68 + version: 0.5.4.dev9+gecd9ebe0d requires_dist: - - annotated-types - - cloudpickle - - joblib - - numpy - - pandas - - plotly + - annotated-types>=0.4 + - cloudpickle>=2.2 + - joblib>=1.1 + - numpy>=1.26 + - pandas>=2.1 + - plotly>=5.14 - pybaum>=0.1.2 - - scipy>=1.2.1 - - sqlalchemy>=1.3 - - typing-extensions - requires_python: '>=3.10' + - scipy>=1.11 + - sqlalchemy>=2.0 + - typing-extensions>=4.5 + requires_python: '>=3.12' - pypi: https://files.pythonhosted.org/packages/08/67/2e19866a03a6e75eb62194a5b55e1e3154ca1517478c300232b0229f8c2a/optree-0.19.0-cp314-cp314-macosx_11_0_arm64.whl name: optree version: 0.19.0 @@ -8284,6 +11111,48 @@ packages: - numpy ; extra == 'docs' - torch ; extra == 'docs' requires_python: '>=3.9' +- pypi: https://files.pythonhosted.org/packages/9c/1a/4834b1f2fb1847412353d7342eb7a1d001a4f3bd9d24155e057135a4aa44/optree-0.19.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl + name: optree + version: 0.19.1 + sha256: 3d0e1493429ae1d1a5e34855774ee604c974a8f76656bd0e562cdbf9466c9b1f + requires_dist: + - typing-extensions>=4.6.0 + - typing-extensions>=4.12.0 ; python_full_version >= '3.13' + - attrs ; extra == 'attrs' + - jax ; extra == 'jax' + - numpy ; extra == 'numpy' + - torch ; extra == 'torch' + - cpplint ; extra == 'lint' + - doc8 ; extra == 'lint' + - mypy ; extra == 'lint' + - pre-commit ; extra == 'lint' + - pyenchant ; extra == 'lint' + - pylint[spelling] ; extra == 'lint' + - ruff ; extra == 'lint' + - xdoctest ; extra == 'lint' + - pytest ; extra == 'test' + - pytest-cov ; extra == 'test' + - covdefaults ; extra == 'test' + - rich ; extra == 'test' + - typing-extensions==4.6.0 ; python_full_version < '3.13' and sys_platform == 'linux' and extra == 'test' + - typing-extensions==4.6.0 ; python_full_version < '3.13' and sys_platform == 'darwin' and extra == 'test' + - typing-extensions==4.6.0 ; python_full_version < '3.13' and sys_platform == 'win32' and extra == 'test' + - typing-extensions==4.12.0 ; python_full_version >= '3.13' and sys_platform == 'linux' and extra == 'test' + - typing-extensions==4.12.0 ; python_full_version >= '3.13' and sys_platform == 'darwin' and extra == 'test' + - typing-extensions==4.12.0 ; python_full_version >= '3.13' and sys_platform == 'win32' and extra == 'test' + - sphinx~=8.0 ; extra == 'docs' + - sphinx-autoapi ; extra == 'docs' + - sphinx-autobuild ; extra == 'docs' + - sphinx-autodoc-typehints ; extra == 'docs' + - sphinx-copybutton ; extra == 'docs' + - sphinx-rtd-theme ; extra == 'docs' + - sphinxcontrib-bibtex ; extra == 'docs' + - docutils ; extra == 'docs' + - attrs ; extra == 'docs' + - jax[cpu] ; extra == 'docs' + - numpy ; extra == 'docs' + - torch ; extra == 'docs' + requires_python: '>=3.9' - pypi: https://files.pythonhosted.org/packages/12/df/172771902943af54bf661a8d102bdf2e7f932127968080632bda6054b62c/orjson-3.11.7-cp314-cp314-win_amd64.whl name: orjson version: 3.11.7 @@ -8299,6 +11168,11 @@ packages: version: 3.11.7 sha256: de0a37f21d0d364954ad5de1970491d7fbd0fb1ef7417d4d56a36dc01ba0c0a0 requires_python: '>=3.10' +- pypi: https://files.pythonhosted.org/packages/76/3e/c0b690253f0b82d86e99949af13533363acfb5432ecb5d53dd5b3bce9c34/orjson-3.11.9-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl + name: orjson + version: 3.11.9 + sha256: aaea64f3f467d22e70eeed68bdccb3bc4f83f650446c4a03c59f2cba28a108db + requires_python: '>=3.10' - conda: https://conda.anaconda.org/conda-forge/noarch/overrides-7.7.0-pyhd8ed1ab_1.conda sha256: 1840bd90d25d4930d60f57b4f38d4e0ae3f5b8db2819638709c36098c6ba770c md5: e51f1e4089cad105b6cac64bd8166587 @@ -8336,6 +11210,18 @@ packages: - pkg:pypi/packaging?source=compressed-mapping size: 72010 timestamp: 1769093650580 +- conda: https://conda.anaconda.org/conda-forge/noarch/packaging-26.2-pyhc364b38_0.conda + sha256: 3906abfb6511a3bb309e39b9b1b7bc38f50a723971de2395489fd1f379255890 + md5: 4c06a92e74452cfa53623a81592e8934 + depends: + - python >=3.8 + - python + license: Apache-2.0 + license_family: APACHE + purls: + - pkg:pypi/packaging?source=hash-mapping + size: 91574 + timestamp: 1777103621679 - pypi: https://files.pythonhosted.org/packages/09/f8/8ce132104074f977f907442790eaae24e27bce3b3b454e82faa3237ff098/pandas-3.0.1-cp314-cp314-win_amd64.whl name: pandas version: 3.0.1 @@ -8606,6 +11492,96 @@ packages: - xlsxwriter>=3.2.0 ; extra == 'all' - zstandard>=0.23.0 ; extra == 'all' requires_python: '>=3.11' +- pypi: https://files.pythonhosted.org/packages/15/88/3cdd54fa279341afa10acf8d2b503556b1375245dccc9315659f795dd2e9/pandas-3.0.2-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl + name: pandas + version: 3.0.2 + sha256: deeca1b5a931fdf0c2212c8a659ade6d3b1edc21f0914ce71ef24456ca7a6535 + requires_dist: + - numpy>=1.26.0 ; python_full_version < '3.14' + - numpy>=2.3.3 ; python_full_version >= '3.14' + - python-dateutil>=2.8.2 + - tzdata ; sys_platform == 'win32' + - tzdata ; sys_platform == 'emscripten' + - hypothesis>=6.116.0 ; extra == 'test' + - pytest>=8.3.4 ; extra == 'test' + - pytest-xdist>=3.6.1 ; extra == 'test' + - pyarrow>=13.0.0 ; extra == 'pyarrow' + - bottleneck>=1.4.2 ; extra == 'performance' + - numba>=0.60.0 ; extra == 'performance' + - numexpr>=2.10.2 ; extra == 'performance' + - scipy>=1.14.1 ; extra == 'computation' + - xarray>=2024.10.0 ; extra == 'computation' + - fsspec>=2024.10.0 ; extra == 'fss' + - s3fs>=2024.10.0 ; extra == 'aws' + - gcsfs>=2024.10.0 ; extra == 'gcp' + - odfpy>=1.4.1 ; extra == 'excel' + - openpyxl>=3.1.5 ; extra == 'excel' + - python-calamine>=0.3.0 ; extra == 'excel' + - pyxlsb>=1.0.10 ; extra == 'excel' + - xlrd>=2.0.1 ; extra == 'excel' + - xlsxwriter>=3.2.0 ; extra == 'excel' + - pyarrow>=13.0.0 ; extra == 'parquet' + - pyarrow>=13.0.0 ; extra == 'feather' + - pyiceberg>=0.8.1 ; extra == 'iceberg' + - tables>=3.10.1 ; extra == 'hdf5' + - pyreadstat>=1.2.8 ; extra == 'spss' + - sqlalchemy>=2.0.36 ; extra == 'postgresql' + - psycopg2>=2.9.10 ; extra == 'postgresql' + - adbc-driver-postgresql>=1.2.0 ; extra == 'postgresql' + - sqlalchemy>=2.0.36 ; extra == 'mysql' + - pymysql>=1.1.1 ; extra == 'mysql' + - sqlalchemy>=2.0.36 ; extra == 'sql-other' + - adbc-driver-postgresql>=1.2.0 ; extra == 'sql-other' + - adbc-driver-sqlite>=1.2.0 ; extra == 'sql-other' + - beautifulsoup4>=4.12.3 ; extra == 'html' + - html5lib>=1.1 ; extra == 'html' + - lxml>=5.3.0 ; extra == 'html' + - lxml>=5.3.0 ; extra == 'xml' + - matplotlib>=3.9.3 ; extra == 'plot' + - jinja2>=3.1.5 ; extra == 'output-formatting' + - tabulate>=0.9.0 ; extra == 'output-formatting' + - pyqt5>=5.15.9 ; extra == 'clipboard' + - qtpy>=2.4.2 ; extra == 'clipboard' + - zstandard>=0.23.0 ; extra == 'compression' + - pytz>=2024.2 ; extra == 'timezone' + - adbc-driver-postgresql>=1.2.0 ; extra == 'all' + - adbc-driver-sqlite>=1.2.0 ; extra == 'all' + - beautifulsoup4>=4.12.3 ; extra == 'all' + - bottleneck>=1.4.2 ; extra == 'all' + - fastparquet>=2024.11.0 ; extra == 'all' + - fsspec>=2024.10.0 ; extra == 'all' + - gcsfs>=2024.10.0 ; extra == 'all' + - html5lib>=1.1 ; extra == 'all' + - hypothesis>=6.116.0 ; extra == 'all' + - jinja2>=3.1.5 ; extra == 'all' + - lxml>=5.3.0 ; extra == 'all' + - matplotlib>=3.9.3 ; extra == 'all' + - numba>=0.60.0 ; extra == 'all' + - numexpr>=2.10.2 ; extra == 'all' + - odfpy>=1.4.1 ; extra == 'all' + - openpyxl>=3.1.5 ; extra == 'all' + - psycopg2>=2.9.10 ; extra == 'all' + - pyarrow>=13.0.0 ; extra == 'all' + - pyiceberg>=0.8.1 ; extra == 'all' + - pymysql>=1.1.1 ; extra == 'all' + - pyqt5>=5.15.9 ; extra == 'all' + - pyreadstat>=1.2.8 ; extra == 'all' + - pytest>=8.3.4 ; extra == 'all' + - pytest-xdist>=3.6.1 ; extra == 'all' + - python-calamine>=0.3.0 ; extra == 'all' + - pytz>=2024.2 ; extra == 'all' + - pyxlsb>=1.0.10 ; extra == 'all' + - qtpy>=2.4.2 ; extra == 'all' + - scipy>=1.14.1 ; extra == 'all' + - s3fs>=2024.10.0 ; extra == 'all' + - sqlalchemy>=2.0.36 ; extra == 'all' + - tables>=3.10.1 ; extra == 'all' + - tabulate>=0.9.0 ; extra == 'all' + - xarray>=2024.10.0 ; extra == 'all' + - xlrd>=2.0.1 ; extra == 'all' + - xlsxwriter>=3.2.0 ; extra == 'all' + - zstandard>=0.23.0 ; extra == 'all' + requires_python: '>=3.11' - pypi: https://files.pythonhosted.org/packages/7c/2f/f91e4eee21585ff548e83358332d5632ee49f6b2dcd96cb5dca4e0468951/pandas_stubs-3.0.0.260204-py3-none-any.whl name: pandas-stubs version: 3.0.0.260204 @@ -8636,6 +11612,18 @@ packages: - pkg:pypi/parso?source=hash-mapping size: 82287 timestamp: 1770676243987 +- conda: https://conda.anaconda.org/conda-forge/noarch/parso-0.8.7-pyhcf101f3_0.conda + sha256: 611882f7944b467281c46644ffde6c5145d1a7730388bcde26e7e86819b0998e + md5: 39894c952938276405a1bd30e4ce2caf + depends: + - python >=3.10 + - python + license: MIT + license_family: MIT + purls: + - pkg:pypi/parso?source=hash-mapping + size: 82472 + timestamp: 1777722955579 - pypi: https://files.pythonhosted.org/packages/b1/29/c028a0731e202035f0e2e0bfbf1a3e46ad6c628cbb17f6f1cc9eea5d9ff1/pathlib_abc-0.5.2-py3-none-any.whl name: pathlib-abc version: 0.5.2 @@ -8684,6 +11672,29 @@ packages: - pkg:pypi/pillow?source=hash-mapping size: 1073026 timestamp: 1770794002408 +- conda: https://conda.anaconda.org/conda-forge/linux-64/pillow-12.2.0-py314h8ec4b1a_0.conda + sha256: 123d8a7c16c88658b4f29e9f115a047598c941708dade74fbaff373a32dbec5e + md5: 76c4757c0ec9d11f969e8eb44899307b + depends: + - python + - libgcc >=14 + - __glibc >=2.17,<3.0.a0 + - libtiff >=4.7.1,<4.8.0a0 + - openjpeg >=2.5.4,<3.0a0 + - libxcb >=1.17.0,<2.0a0 + - libwebp-base >=1.6.0,<2.0a0 + - zlib-ng >=2.3.3,<2.4.0a0 + - libjpeg-turbo >=3.1.2,<4.0a0 + - python_abi 3.14.* *_cp314 + - libfreetype >=2.14.3 + - libfreetype6 >=2.14.3 + - lcms2 >=2.18,<3.0a0 + - tk >=8.6.13,<8.7.0a0 + license: HPND + purls: + - pkg:pypi/pillow?source=hash-mapping + size: 1082797 + timestamp: 1775060059882 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pillow-12.1.1-py314hab283cf_0.conda sha256: 1659ff6e8ea6170a90fb8eb7291990d12bba270aab18176defa0717ed34ce186 md5: bcb38a8005e93a3b240a0dbcf28df87a @@ -8743,6 +11754,18 @@ packages: - pkg:pypi/platformdirs?source=compressed-mapping size: 25646 timestamp: 1773199142345 +- conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.9.6-pyhcf101f3_0.conda + sha256: 8f29915c172f1f7f4f7c9391cd5dac3ebf5d13745c8b7c8006032615246345a5 + md5: 89c0b6d1793601a2a3a3f7d2d3d8b937 + depends: + - python >=3.10 + - python + license: MIT + license_family: MIT + purls: + - pkg:pypi/platformdirs?source=hash-mapping + size: 25862 + timestamp: 1775741140609 - pypi: https://files.pythonhosted.org/packages/52/d2/c6e44dba74f17c6216ce1b56044a9b93a929f1c2d5bdaff892512b260f5e/plotly-6.6.0-py3-none-any.whl name: plotly version: 6.6.0 @@ -8783,6 +11806,78 @@ packages: - xarray ; extra == 'dev-optional' - plotly[dev-optional] ; extra == 'dev' requires_python: '>=3.8' +- pypi: https://files.pythonhosted.org/packages/90/ad/cba91b3bcf04073e4d1655a5c1710ef3f457f56f7d1b79dcc3d72f4dd912/plotly-6.7.0-py3-none-any.whl + name: plotly + version: 6.7.0 + sha256: ac8aca1c25c663a59b5b9140a549264a5badde2e057d79b8c772ae2920e32ff0 + requires_dist: + - narwhals>=1.15.1 + - packaging + - anywidget ; extra == 'dev' + - build ; extra == 'dev' + - colorcet ; extra == 'dev' + - fiona<=1.9.6 ; python_full_version < '3.9' and extra == 'dev' + - geopandas ; extra == 'dev' + - inflect ; extra == 'dev' + - jupyterlab ; extra == 'dev' + - kaleido>=1.1.0 ; extra == 'dev' + - numpy>=1.22 ; extra == 'dev' + - orjson ; extra == 'dev' + - pandas ; extra == 'dev' + - pdfrw ; extra == 'dev' + - pillow ; extra == 'dev' + - plotly-geo ; extra == 'dev' + - polars[timezone] ; extra == 'dev' + - pyarrow ; extra == 'dev' + - pyshp ; extra == 'dev' + - pytest ; extra == 'dev' + - pytz ; extra == 'dev' + - requests ; extra == 'dev' + - ruff==0.11.12 ; extra == 'dev' + - scikit-image ; extra == 'dev' + - scipy ; extra == 'dev' + - shapely ; extra == 'dev' + - statsmodels ; extra == 'dev' + - vaex ; python_full_version < '3.10' and extra == 'dev' + - xarray ; extra == 'dev' + - build ; extra == 'dev-build' + - jupyterlab ; extra == 'dev-build' + - pytest ; extra == 'dev-build' + - requests ; extra == 'dev-build' + - ruff==0.11.12 ; extra == 'dev-build' + - pytest ; extra == 'dev-core' + - requests ; extra == 'dev-core' + - ruff==0.11.12 ; extra == 'dev-core' + - anywidget ; extra == 'dev-optional' + - build ; extra == 'dev-optional' + - colorcet ; extra == 'dev-optional' + - fiona<=1.9.6 ; python_full_version < '3.9' and extra == 'dev-optional' + - geopandas ; extra == 'dev-optional' + - inflect ; extra == 'dev-optional' + - jupyterlab ; extra == 'dev-optional' + - kaleido>=1.1.0 ; extra == 'dev-optional' + - numpy>=1.22 ; extra == 'dev-optional' + - orjson ; extra == 'dev-optional' + - pandas ; extra == 'dev-optional' + - pdfrw ; extra == 'dev-optional' + - pillow ; extra == 'dev-optional' + - plotly-geo ; extra == 'dev-optional' + - polars[timezone] ; extra == 'dev-optional' + - pyarrow ; extra == 'dev-optional' + - pyshp ; extra == 'dev-optional' + - pytest ; extra == 'dev-optional' + - pytz ; extra == 'dev-optional' + - requests ; extra == 'dev-optional' + - ruff==0.11.12 ; extra == 'dev-optional' + - scikit-image ; extra == 'dev-optional' + - scipy ; extra == 'dev-optional' + - shapely ; extra == 'dev-optional' + - statsmodels ; extra == 'dev-optional' + - vaex ; python_full_version < '3.10' and extra == 'dev-optional' + - xarray ; extra == 'dev-optional' + - numpy>=1.22 ; extra == 'express' + - kaleido>=1.1.0 ; extra == 'kaleido' + requires_python: '>=3.8' - pypi: https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl name: pluggy version: 1.6.0 @@ -8806,6 +11901,19 @@ packages: - pkg:pypi/pluggy?source=compressed-mapping size: 25877 timestamp: 1764896838868 +- conda: https://conda.anaconda.org/conda-forge/linux-64/prek-0.3.13-hb17b654_0.conda + sha256: d0f4f26f16e3fc61cad88e341adf7fda8a619a68dc0afbcdfe65571d22aaa5c7 + md5: 605c9bd0d875ad759b4ea1f785f7ae70 + depends: + - libgcc >=14 + - __glibc >=2.17,<3.0.a0 + constrains: + - __glibc >=2.17 + license: MIT + license_family: MIT + purls: [] + size: 5916663 + timestamp: 1778015597635 - conda: https://conda.anaconda.org/conda-forge/linux-64/prek-0.3.6-hb17b654_0.conda sha256: a26627790776987421ecb130240dfd5c26e706d6811e173f7bdf3029bec13e1e md5: 903cc9fafd676d3c13d9c1e71a52231a @@ -8854,6 +11962,17 @@ packages: - pkg:pypi/prometheus-client?source=compressed-mapping size: 56634 timestamp: 1768476602855 +- conda: https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.25.0-pyhd8ed1ab_0.conda + sha256: 4d7ec90d4f9c1f3b4a50623fefe4ebba69f651b102b373f7c0e9dbbfa43d495c + md5: a11ab1f31af799dd93c3a39881528884 + depends: + - python >=3.10 + license: Apache-2.0 + license_family: Apache + purls: + - pkg:pypi/prometheus-client?source=hash-mapping + size: 57113 + timestamp: 1775771465170 - conda: https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.52-pyha770c72_0.conda sha256: 4817651a276016f3838957bfdf963386438c70761e9faec7749d411635979bae md5: edb16f14d920fb3faf17f5ce582942d6 @@ -8999,6 +12118,17 @@ packages: - pkg:pypi/pygments?source=hash-mapping size: 889287 timestamp: 1750615908735 +- conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.20.0-pyhd8ed1ab_0.conda + sha256: cf70b2f5ad9ae472b71235e5c8a736c9316df3705746de419b59d442e8348e86 + md5: 16c18772b340887160c79a6acc022db0 + depends: + - python >=3.10 + license: BSD-2-Clause + license_family: BSD + purls: + - pkg:pypi/pygments?source=hash-mapping + size: 893031 + timestamp: 1774796815820 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyobjc-core-12.1-py314h3a4d195_0.conda sha256: df5af268c5a74b7160d772c263ece6f43257faff571783443e34b5f1d5a61cf2 md5: 75a84fc8337557347252cc4fd3ba2a93 @@ -9081,13 +12211,31 @@ packages: timestamp: 1733217331982 - pypi: https://files.pythonhosted.org/packages/80/b2/bba963dfce0fcbc5020a4f8b4361e132390c4bd78b46cfc7ae355e678b96/pytask-0.5.8-py3-none-any.whl name: pytask - version: 0.5.8 - sha256: 217ed6b3e12140c442afa5e333bbb91e98f8d4fd5c746d323fbbf9778985a92f + version: 0.5.8 + sha256: 217ed6b3e12140c442afa5e333bbb91e98f8d4fd5c746d323fbbf9778985a92f + requires_dist: + - attrs>=21.3.0 + - click>=8.1.8,!=8.2.0 + - click-default-group>=1.2.4 + - networkx>=2.4.0 + - optree>=0.9.0 + - packaging>=23.0.0 + - pluggy>=1.3.0 + - rich>=13.8.0 + - sqlalchemy>=2.0.31 + - tomli>=1 ; python_full_version < '3.11' + - typing-extensions>=4.8.0 ; python_full_version < '3.11' + - universal-pathlib>=0.2.2 + requires_python: '>=3.10' +- pypi: https://files.pythonhosted.org/packages/d7/54/c30cb1d08258612ece1dfa72c6918998bebecb916c54fca6d806bc780f2b/pytask-0.6.0-py3-none-any.whl + name: pytask + version: 0.6.0 + sha256: cc4c31ead39f5c64be037640f7bf589b68bd0e87ea9e1a049ba86ceab42c9d13 requires_dist: - - attrs>=21.3.0 - click>=8.1.8,!=8.2.0 - click-default-group>=1.2.4 - - networkx>=2.4.0 + - msgspec>=0.18.6 + - msgspec[toml]>=0.18.6 - optree>=0.9.0 - packaging>=23.0.0 - pluggy>=1.3.0 @@ -9096,6 +12244,7 @@ packages: - tomli>=1 ; python_full_version < '3.11' - typing-extensions>=4.8.0 ; python_full_version < '3.11' - universal-pathlib>=0.2.2 + - networkx>=2.4.0 ; extra == 'dag' requires_python: '>=3.10' - pypi: https://files.pythonhosted.org/packages/88/b9/19ecce5c57114b703b97378ded69ffde1f9f9d471d4db361bbfa6105861e/pytask_parallel-0.5.2-py3-none-any.whl name: pytask-parallel @@ -9110,6 +12259,19 @@ packages: - pytask>=0.5.2 - rich requires_python: '>=3.10' +- pypi: https://files.pythonhosted.org/packages/5b/f2/44a7dd795a52d34d033b1cb1a6b1162eada650079e557e236fb6b88943be/pytask_parallel-0.5.4-py3-none-any.whl + name: pytask-parallel + version: 0.5.4 + sha256: f05ca8e3251e25621b260659e01bceec43875155afa3dab84a912e1bbd9971d0 + requires_dist: + - attrs>=21.3.0 + - click>=8.1.8,!=8.2.0 + - cloudpickle + - loky + - pluggy>=1.0.0 + - pytask>=0.5.2 + - rich + requires_python: '>=3.10' - pypi: https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl name: pytest version: 9.0.2 @@ -9151,6 +12313,27 @@ packages: - pkg:pypi/pytest?source=hash-mapping size: 299581 timestamp: 1765062031645 +- conda: https://conda.anaconda.org/conda-forge/noarch/pytest-9.0.3-pyhc364b38_1.conda + sha256: 960f59442173eee0731906a9077bd5ccf60f4b4226f05a22d1728ab9a21a879c + md5: 6a991452eadf2771952f39d43615bb3e + depends: + - colorama >=0.4 + - pygments >=2.7.2 + - python >=3.10 + - iniconfig >=1.0.1 + - packaging >=22 + - pluggy >=1.5,<2 + - tomli >=1 + - exceptiongroup >=1 + - python + constrains: + - pytest-faulthandler >=2 + license: MIT + license_family: MIT + purls: + - pkg:pypi/pytest?source=hash-mapping + size: 299984 + timestamp: 1775644472530 - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-cov-7.0.0-pyhcf101f3_1.conda sha256: d0f45586aad48ef604590188c33c83d76e4fc6370ac569ba0900906b24fd6a26 md5: 6891acad5e136cb62a8c2ed2679d6528 @@ -9166,6 +12349,21 @@ packages: - pkg:pypi/pytest-cov?source=hash-mapping size: 29016 timestamp: 1757612051022 +- conda: https://conda.anaconda.org/conda-forge/noarch/pytest-cov-7.1.0-pyhcf101f3_0.conda + sha256: 44e42919397bd00bfaa47358a6ca93d4c21493a8c18600176212ec21a8d25ca5 + md5: 67d1790eefa81ed305b89d8e314c7923 + depends: + - coverage >=7.10.6 + - pluggy >=1.2 + - pytest >=7 + - python >=3.10 + - python + license: MIT + license_family: MIT + purls: + - pkg:pypi/pytest-cov?source=hash-mapping + size: 29559 + timestamp: 1774139250481 - conda: https://conda.anaconda.org/conda-forge/noarch/pytest-memray-1.8.0-pyhd8ed1ab_0.conda sha256: 03f9bc063bf51454bfcad859918dbb08673fb848d8d7b12f1b8130fa59fec9fa md5: 8ec1201026003252fb9a9c1c67c10ebf @@ -9229,6 +12427,34 @@ packages: size: 36702440 timestamp: 1770675584356 python_site_packages_path: lib/python3.14/site-packages +- conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.14.4-habeac84_100_cp314.conda + build_number: 100 + sha256: dec247c5badc811baa34d6085df9d0465535883cf745e22e8d79092ad54a3a7b + md5: a443f87920815d41bfe611296e507995 + depends: + - __glibc >=2.17,<3.0.a0 + - bzip2 >=1.0.8,<2.0a0 + - ld_impl_linux-64 >=2.36.1 + - libexpat >=2.7.5,<3.0a0 + - libffi >=3.5.2,<3.6.0a0 + - libgcc >=14 + - liblzma >=5.8.2,<6.0a0 + - libmpdec >=4.0.0,<5.0a0 + - libsqlite >=3.52.0,<4.0a0 + - libuuid >=2.42,<3.0a0 + - libzlib >=1.3.2,<2.0a0 + - ncurses >=6.5,<7.0a0 + - openssl >=3.5.6,<4.0a0 + - python_abi 3.14.* *_cp314 + - readline >=8.3,<9.0a0 + - tk >=8.6.13,<8.7.0a0 + - tzdata + - zstd >=1.5.7,<1.6.0a0 + license: Python-2.0 + purls: [] + size: 36705460 + timestamp: 1775614357822 + python_site_packages_path: lib/python3.14/site-packages - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.14.3-h4c637c5_101_cp314.conda build_number: 101 sha256: fccce2af62d11328d232df9f6bbf63464fd45f81f718c661757f9c628c4378ce @@ -9314,6 +12540,16 @@ packages: purls: [] size: 50062 timestamp: 1770674497152 +- conda: https://conda.anaconda.org/conda-forge/noarch/python-gil-3.14.4-h4df99d1_100.conda + sha256: 36ff7984e4565c85149e64f8206303d412a0652e55cf806dcb856903fa056314 + md5: e4e60721757979d01d3964122f674959 + depends: + - cpython 3.14.4.* + - python_abi * *_cp314 + license: Python-2.0 + purls: [] + size: 49806 + timestamp: 1775614307464 - conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-2.0.7-pyhd8ed1ab_0.conda sha256: 4790787fe1f4e8da616edca4acf6a4f8ed4e7c6967aa31b920208fc8f95efcca md5: a61bf9ec79426938ff785eb69dbb1960 @@ -9325,6 +12561,18 @@ packages: - pkg:pypi/python-json-logger?source=hash-mapping size: 13383 timestamp: 1677079727691 +- conda: https://conda.anaconda.org/conda-forge/noarch/python-json-logger-3.2.1-pyh332efcf_0.conda + sha256: 1c55116c22512cef7b01d55ae49697707f2c1fd829407183c19817e2d300fd8d + md5: 1cd2f3e885162ee1366312bd1b1677fd + depends: + - python >=3.10 + - typing_extensions + license: BSD-2-Clause + license_family: BSD + purls: + - pkg:pypi/python-json-logger?source=hash-mapping + size: 18969 + timestamp: 1777318679482 - conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2025.3-pyhd8ed1ab_0.conda sha256: 467134ef39f0af2dbb57d78cb3e4821f01003488d331a8dd7119334f4f47bfbd md5: 7ead57407430ba33f681738905278d03 @@ -9336,6 +12584,17 @@ packages: - pkg:pypi/tzdata?source=compressed-mapping size: 143542 timestamp: 1765719982349 +- conda: https://conda.anaconda.org/conda-forge/noarch/python-tzdata-2026.2-pyhd8ed1ab_0.conda + sha256: e943f9c15a6bdba2e1b9f423ab913b3f6b02197b0ef9f8e6b7464d78b59965b9 + md5: f6ad7450fc21e00ecc23812baed6d2e4 + depends: + - python >=3.10 + license: Apache-2.0 + license_family: APACHE + purls: + - pkg:pypi/tzdata?source=hash-mapping + size: 146639 + timestamp: 1777068997932 - conda: https://conda.anaconda.org/conda-forge/noarch/python_abi-3.14-8_cp314.conda build_number: 8 sha256: ad6d2e9ac39751cc0529dd1566a26751a0bf2542adb0c232533d32e176e21db5 @@ -9568,6 +12827,24 @@ packages: - pkg:pypi/requests?source=compressed-mapping size: 63602 timestamp: 1766926974520 +- conda: https://conda.anaconda.org/conda-forge/noarch/requests-2.33.1-pyhcf101f3_1.conda + sha256: 7f2c24dd3bd3c104a1d2c9a10ead5ed6758b0976b74f972cfe9c19884ccc4241 + md5: 9659f587a8ceacc21864260acd02fc67 + depends: + - python >=3.10 + - certifi >=2023.5.7 + - charset-normalizer >=2,<4 + - idna >=2.5,<4 + - urllib3 >=1.26,<3 + - python + constrains: + - chardet >=3.0.2,<8 + license: Apache-2.0 + license_family: APACHE + purls: + - pkg:pypi/requests?source=hash-mapping + size: 63728 + timestamp: 1777030058920 - conda: https://conda.anaconda.org/conda-forge/noarch/rfc3339-validator-0.1.4-pyhd8ed1ab_1.conda sha256: 2e4372f600490a6e0b3bac60717278448e323cab1c0fecd5f43f7c56535a99c5 md5: 36de09a8d3e5d5e6f4ee63af49e59706 @@ -9628,6 +12905,21 @@ packages: - pkg:pypi/rich?source=compressed-mapping size: 208472 timestamp: 1771572730357 +- conda: https://conda.anaconda.org/conda-forge/noarch/rich-15.0.0-pyhcf101f3_0.conda + sha256: 3d6ba2c0fcdac3196ba2f0615b4104e532525ffa1335b50a2878be5ff488814a + md5: 0242025a3c804966bf71aa04eee82f66 + depends: + - markdown-it-py >=2.2.0 + - pygments >=2.13.0,<3.0.0 + - python >=3.10 + - typing_extensions >=4.0.0,<5.0.0 + - python + license: MIT + license_family: MIT + purls: + - pkg:pypi/rich?source=hash-mapping + size: 208577 + timestamp: 1775991661559 - conda: https://conda.anaconda.org/conda-forge/linux-64/rpds-py-0.30.0-py314h2e6c369_0.conda sha256: e53b0cbf3b324eaa03ca1fe1a688fdf4ab42cea9c25270b0a7307d8aaaa4f446 md5: c1c368b5437b0d1a68f372ccf01cb133 @@ -9675,6 +12967,92 @@ packages: - pkg:pypi/rpds-py?source=hash-mapping size: 235780 timestamp: 1764543046065 +- conda: https://conda.anaconda.org/conda-forge/linux-64/s2n-1.7.1-h1cbb8d7_1.conda + sha256: dbbe4ab36b90427f12d69fc14a8b601b6bca4185c6c4dd67b8046a8da9daec03 + md5: 9d978822b57bafe72ebd3f8b527bba71 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + - openssl >=3.5.5,<4.0a0 + license: Apache-2.0 + license_family: Apache + purls: [] + size: 395083 + timestamp: 1773251675551 +- conda: https://conda.anaconda.org/conda-forge/linux-64/s2n-1.7.2-hc5a330e_1.conda + sha256: 856866fd519b812db3e092aba308248dd87b5c308186fcffe593f309373ae94c + md5: 3f578c7d2b0bb52469340e4060d48d94 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + - openssl >=3.5.6,<4.0a0 + license: Apache-2.0 + license_family: Apache + purls: [] + size: 387306 + timestamp: 1777466173323 +- conda: https://conda.anaconda.org/conda-forge/linux-64/scikit-learn-1.8.0-np2py314hf09ca88_1.conda + sha256: bcf374fe61712928c624f410a831e9f2a36ad13429f598e6028203588d24b914 + md5: c9d90e43202c721281f3d74129223515 + depends: + - python + - numpy >=1.24.1 + - scipy >=1.10.0 + - joblib >=1.3.0 + - threadpoolctl >=3.2.0 + - libstdcxx >=14 + - libgcc >=14 + - __glibc >=2.17,<3.0.a0 + - _openmp_mutex >=4.5 + - python_abi 3.14.* *_cp314 + - numpy >=1.23,<3 + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/scikit-learn?source=hash-mapping + size: 9992698 + timestamp: 1765801260253 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/scikit-learn-1.8.0-np2py314h15f0f0f_1.conda + sha256: 3b30f332fb87598de8c31a3cbec1bc79b926bcc6f535bda10054721a96c256dc + md5: d9bc75bfda103e05a55e4034fded8ddf + depends: + - python + - numpy >=1.24.1 + - scipy >=1.10.0 + - joblib >=1.3.0 + - threadpoolctl >=3.2.0 + - llvm-openmp >=19.1.7 + - python 3.14.* *_cp314 + - __osx >=11.0 + - libcxx >=19 + - python_abi 3.14.* *_cp314 + - numpy >=1.23,<3 + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/scikit-learn?source=hash-mapping + size: 9383244 + timestamp: 1766550871162 +- conda: https://conda.anaconda.org/conda-forge/win-64/scikit-learn-1.8.0-np2py314h1b5b07a_1.conda + sha256: ce701fcf35e0b65d0822fe916f5536ed326c1b842fe1ba6d08c5fcac4ec8dc75 + md5: ba2216c82d626684433912bfec8a4843 + depends: + - python + - numpy >=1.24.1 + - scipy >=1.10.0 + - joblib >=1.3.0 + - threadpoolctl >=3.2.0 + - vc >=14.3,<15 + - vc14_runtime >=14.44.35208 + - ucrt >=10.0.20348.0 + - python_abi 3.14.* *_cp314 + - numpy >=1.23,<3 + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/scikit-learn?source=hash-mapping + size: 9139165 + timestamp: 1765801295593 - conda: https://conda.anaconda.org/conda-forge/linux-64/scipy-1.17.1-py314hf07bd8e_0.conda sha256: 1ae427836d7979779c9005388a05993a3addabcc66c4422694639a4272d7d972 md5: d0510124f87c75403090e220db1e9d41 @@ -9799,6 +13177,11 @@ packages: version: 3.20.2 sha256: 3b6bb7fb96efd673eac2e4235200bfffdc2353ad12c54117e1e4e2fc485ac017 requires_python: '>=2.5,!=3.0.*,!=3.1.*,!=3.2.*' +- pypi: https://files.pythonhosted.org/packages/78/91/3635cdb13318cb0a328abaa69e2b91251caad39d6779aa308098f341f6cb/simplejson-4.1.1-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl + name: simplejson + version: 4.1.1 + sha256: 3851658d642c1184d2023f0e6c9ce44a21eb1629e74e7c84ef956b128841fe12 + requires_python: '>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*' - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhe01879c_1.conda sha256: 458227f759d5e3fcec5d9b7acce54e10c9e1f4f4b7ec978f3bfd54ce4ee9853d md5: 3339e3b65d58accf4ca4fb8748ab16b3 @@ -9813,8 +13196,8 @@ packages: timestamp: 1753199211006 - pypi: ./ name: skillmodels - version: 0.0.24.dev242+g859d7ecae - sha256: 4f390213c39753657a315f50f7597b22802fe529305722f787011d8fd2e88b1c + version: 0.0.24.dev317+g052e5e40a.d20260511 + sha256: ebba64951651eed4efad4074de95ecca839d30113f9e1ae2a700b023ac1252e9 requires_dist: - dags>=0.5.1 - jax>=0.9 @@ -9826,6 +13209,7 @@ packages: - plotly>=6.6 - pytask-parallel>=0.5.2 - pytask>=0.5.8 + - scikit-learn>=1.5 requires_python: '>=3.14,<3.15' - conda: https://conda.anaconda.org/conda-forge/noarch/snakeviz-2.2.2-pyhd8ed1ab_1.conda sha256: 833326122c18887b338262c13365cb146b6702c79d72da74a1c6b8af4c50e162 @@ -9975,6 +13359,44 @@ packages: - typing-extensions!=3.10.0.1 ; extra == 'aiosqlite' - sqlcipher3-binary ; extra == 'sqlcipher' requires_python: '>=3.7' +- pypi: https://files.pythonhosted.org/packages/2e/84/efc7c0bf3a1c5eef81d397f6fddac855becdbb11cb38ff957888603014a7/sqlalchemy-2.0.49-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl + name: sqlalchemy + version: 2.0.49 + sha256: 685e93e9c8f399b0c96a624799820176312f5ceef958c0f88215af4013d29066 + requires_dist: + - importlib-metadata ; python_full_version < '3.8' + - greenlet>=1 ; platform_machine == 'AMD64' or platform_machine == 'WIN32' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'ppc64le' or platform_machine == 'win32' or platform_machine == 'x86_64' + - typing-extensions>=4.6.0 + - greenlet>=1 ; extra == 'asyncio' + - mypy>=0.910 ; extra == 'mypy' + - pyodbc ; extra == 'mssql' + - pymssql ; extra == 'mssql-pymssql' + - pyodbc ; extra == 'mssql-pyodbc' + - mysqlclient>=1.4.0 ; extra == 'mysql' + - mysql-connector-python ; extra == 'mysql-connector' + - mariadb>=1.0.1,!=1.1.2,!=1.1.5,!=1.1.10 ; extra == 'mariadb-connector' + - cx-oracle>=8 ; extra == 'oracle' + - oracledb>=1.0.1 ; extra == 'oracle-oracledb' + - psycopg2>=2.7 ; extra == 'postgresql' + - pg8000>=1.29.1 ; extra == 'postgresql-pg8000' + - greenlet>=1 ; extra == 'postgresql-asyncpg' + - asyncpg ; extra == 'postgresql-asyncpg' + - psycopg2-binary ; extra == 'postgresql-psycopg2binary' + - psycopg2cffi ; extra == 'postgresql-psycopg2cffi' + - psycopg>=3.0.7 ; extra == 'postgresql-psycopg' + - psycopg[binary]>=3.0.7 ; extra == 'postgresql-psycopgbinary' + - pymysql ; extra == 'pymysql' + - greenlet>=1 ; extra == 'aiomysql' + - aiomysql>=0.2.0 ; extra == 'aiomysql' + - greenlet>=1 ; extra == 'aioodbc' + - aioodbc ; extra == 'aioodbc' + - greenlet>=1 ; extra == 'asyncmy' + - asyncmy>=0.2.3,!=0.2.4,!=0.2.6 ; extra == 'asyncmy' + - greenlet>=1 ; extra == 'aiosqlite' + - aiosqlite ; extra == 'aiosqlite' + - typing-extensions!=3.10.0.1 ; extra == 'aiosqlite' + - sqlcipher3-binary ; extra == 'sqlcipher' + requires_python: '>=3.7' - conda: https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.3-pyhd8ed1ab_1.conda sha256: 570da295d421661af487f1595045760526964f41471021056e993e73089e9c41 md5: b1b505328da7a6b246787df4b5a49fbc @@ -10008,6 +13430,13 @@ packages: requires_dist: - pyreadline3 ; sys_platform == 'win32' requires_python: '>=3.8' +- pypi: https://files.pythonhosted.org/packages/cb/fc/8c82be70b8f96d09943360f34cfb2ecdd3035294c51bce4131eeabe56645/tabcompleter-1.4.1-py3-none-any.whl + name: tabcompleter + version: 1.4.1 + sha256: 26b5cf330a48f32625b00e1664aa589f67c8e98275b6d9c2b85d19917dac1601 + requires_dist: + - pyreadline3 ; sys_platform == 'win32' + requires_python: '>=3.8' - conda: https://conda.anaconda.org/conda-forge/win-64/tbb-2022.3.0-h3155e25_2.conda sha256: abd9a489f059fba85c8ffa1abdaa4d515d6de6a3325238b8e81203b913cf65a9 md5: 0f9817ffbe25f9e69ceba5ea70c52606 @@ -10073,6 +13502,39 @@ packages: - pkg:pypi/textual?source=hash-mapping size: 528806 timestamp: 1773220924332 +- conda: https://conda.anaconda.org/conda-forge/noarch/textual-8.2.5-pyhcf101f3_0.conda + sha256: 9fb5734805d4c78d1f05c712485db2f537a933083eee70a854fb11f305da51b6 + md5: ab380da68231be1e9b10519f63a4e77e + depends: + - pygments >=2.19.2,<3.0.0 + - typing_extensions >=4.4.0,<5.0.0 + - platformdirs >=3.6.0,<5 + - python >=3.10,<4.0.0 + - markdown-it-py >=2.1.0 + - linkify-it-py >=1,<3 + - mdit-py-plugins + - rich >=14.2.0 + - python + constrains: + - tree_sitter >=0.25.0 + - tree_sitter_languages 1.10.2.* + license: MIT + license_family: MIT + purls: + - pkg:pypi/textual?source=hash-mapping + size: 535137 + timestamp: 1777572419169 +- conda: https://conda.anaconda.org/conda-forge/noarch/threadpoolctl-3.6.0-pyhecae5ae_0.conda + sha256: 6016672e0e72c4cf23c0cf7b1986283bd86a9c17e8d319212d78d8e9ae42fdfd + md5: 9d64911b31d57ca443e9f1e36b04385f + depends: + - python >=3.9 + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/threadpoolctl?source=hash-mapping + size: 23869 + timestamp: 1741878358548 - conda: https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.4.0-pyhd8ed1ab_0.conda sha256: cad582d6f978276522f84bd209a5ddac824742fe2d452af6acf900f8650a73a2 md5: f1acf5fdefa8300de697982bcb1761c9 @@ -10134,6 +13596,23 @@ packages: - pkg:pypi/tomli?source=compressed-mapping size: 21453 timestamp: 1768146676791 +- conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.4.1-pyhcf101f3_0.conda + sha256: 91cafdb64268e43e0e10d30bd1bef5af392e69f00edd34dfaf909f69ab2da6bd + md5: b5325cf06a000c5b14970462ff5e4d58 + depends: + - python >=3.10 + - python + license: MIT + license_family: MIT + purls: + - pkg:pypi/tomli?source=hash-mapping + size: 21561 + timestamp: 1774492402955 +- pypi: https://files.pythonhosted.org/packages/c7/18/c86eb8e0202e32dd3df50d43d7ff9854f8e0603945ff398974c1d91ac1ef/tomli_w-1.2.0-py3-none-any.whl + name: tomli-w + version: 1.2.0 + sha256: 188306098d013b691fcadc011abd66727d3c414c571bb01b1a174ba8c983cf90 + requires_python: '>=3.9' - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.3-py314h5bd0f2a_0.conda sha256: b8f9f9ae508d79c9c697eb01b6a8d2ed4bc1899370f44aa6497c8abbd15988ea md5: e35f08043f54d26a1be93fdbf90d30c3 @@ -10148,6 +13627,20 @@ packages: - pkg:pypi/tornado?source=hash-mapping size: 905436 timestamp: 1765458949518 +- conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.5.5-py314h5bd0f2a_0.conda + sha256: ed8d06093ff530a2dae9ed1e51eb6f908fbfd171e8b62f4eae782d67b420be5a + md5: dc1ff1e915ab35a06b6fa61efae73ab5 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=14 + - python >=3.14,<3.15.0a0 + - python_abi 3.14.* *_cp314 + license: Apache-2.0 + license_family: Apache + purls: + - pkg:pypi/tornado?source=hash-mapping + size: 912476 + timestamp: 1774358032579 - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tornado-6.5.4-py314h0612a62_0.conda sha256: affbc6300e1baef5848f6e69569733a3e7a118aa642487c853f53d6f2bd23b89 md5: 83e1a2d7b0c1352870bbe9d9406135cf @@ -10188,6 +13681,18 @@ packages: - pkg:pypi/traitlets?source=hash-mapping size: 110051 timestamp: 1733367480074 +- conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.15.0-pyhcf101f3_0.conda + sha256: dfb681579be59c2e790c95f7f49b7529a9b0511d6385ad276e3c8988cbd54d2c + md5: 4bada6a6d908a27262af8ebddf4f7492 + depends: + - python >=3.10 + - python + license: BSD-3-Clause + license_family: BSD + purls: + - pkg:pypi/traitlets?source=hash-mapping + size: 115165 + timestamp: 1778074251714 - pypi: https://files.pythonhosted.org/packages/0f/01/3f25909b02fac29bb0a62b2251f8d62e65d697781ffa4cf6b47a4c075c85/ty-0.0.23-py3-none-macosx_11_0_arm64.whl name: ty version: 0.0.23 @@ -10370,6 +13875,21 @@ packages: - pkg:pypi/urllib3?source=hash-mapping size: 103172 timestamp: 1767817860341 +- conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.7.0-pyhd8ed1ab_0.conda + sha256: feff959a816f7988a0893201aa9727bbb7ee1e9cec2c4f0428269b489eb93fb4 + md5: cbb88288f74dbe6ada1c6c7d0a97223e + depends: + - backports.zstd >=1.0.0 + - brotli-python >=1.2.0 + - h2 >=4,<5 + - pysocks >=1.5.6,<2.0,!=1.5.7 + - python >=3.10 + license: MIT + license_family: MIT + purls: + - pkg:pypi/urllib3?source=hash-mapping + size: 103560 + timestamp: 1778188657149 - conda: https://conda.anaconda.org/conda-forge/win-64/vc-14.3-h41ae7f8_34.conda sha256: 9dc40c2610a6e6727d635c62cced5ef30b7b30123f5ef67d6139e23d21744b3a md5: 1e610f2416b6acdd231c5f573d754a0f @@ -10418,6 +13938,17 @@ packages: - pkg:pypi/wcwidth?source=hash-mapping size: 71550 timestamp: 1770634638503 +- conda: https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.7.0-pyhd8ed1ab_0.conda + sha256: 1ee2d8384972ecbf8630ce8a3ea9d16858358ad3e8566675295e66996d5352da + md5: eb9538b8e55069434a18547f43b96059 + depends: + - python >=3.10 + license: MIT + license_family: MIT + purls: + - pkg:pypi/wcwidth?source=hash-mapping + size: 82917 + timestamp: 1777744489106 - conda: https://conda.anaconda.org/conda-forge/noarch/webcolors-25.10.0-pyhd8ed1ab_0.conda sha256: 21f6c8a20fe050d09bfda3fb0a9c3493936ce7d6e1b3b5f8b01319ee46d6c6f6 md5: 6639b6b0d8b5a284f027a2003669aa65 @@ -10469,6 +14000,17 @@ packages: license_family: MIT purls: [] size: 1176306 +- conda: https://conda.anaconda.org/conda-forge/noarch/xlrd-2.0.2-pyhd8ed1ab_0.conda + sha256: 64f09069d8b3a3791643230cedc80d9f9422f667e3e328b40d527375352fe8d4 + md5: 91f5637b706492b9e418da1872fd61ce + depends: + - python >=3.10 + license: BSD-3-Clause AND BSD-4-Clause + license_family: BSD + purls: + - pkg:pypi/xlrd?source=hash-mapping + size: 93671 + timestamp: 1756170155688 - conda: https://conda.anaconda.org/conda-forge/linux-64/xorg-libxau-1.0.12-hb03c661_1.conda sha256: 6bc6ab7a90a5d8ac94c7e300cc10beb0500eeba4b99822768ca2f2ef356f731b md5: b2895afaf55bf96a8c8282a2e47a5de0 @@ -10624,6 +14166,18 @@ packages: - pkg:pypi/zipp?source=hash-mapping size: 24194 timestamp: 1764460141901 +- conda: https://conda.anaconda.org/conda-forge/noarch/zipp-3.23.1-pyhcf101f3_0.conda + sha256: 523616c0530d305d2216c2b4a8dfd3872628b60083255b89c5e0d8c42e738cca + md5: e1c36c6121a7c9c76f2f148f1e83b983 + depends: + - python >=3.10 + - python + license: MIT + license_family: MIT + purls: + - pkg:pypi/zipp?source=hash-mapping + size: 24461 + timestamp: 1776131454755 - conda: https://conda.anaconda.org/conda-forge/linux-64/zlib-ng-2.3.3-hceb46e0_1.conda sha256: ea4e50c465d70236408cb0bfe0115609fd14db1adcd8bd30d8918e0291f8a75f md5: 2aadb0d17215603a82a2a6b0afd9a4cb diff --git a/pyproject.toml b/pyproject.toml index bb09d59a..dff6e04f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,6 +39,7 @@ dependencies = [ "plotly>=6.6", "pytask>=0.5.8", "pytask-parallel>=0.5.2", + "scikit-learn>=1.5", ] [[project.authors]] name = "Janoś Gabler" @@ -95,6 +96,7 @@ per-file-ignores."tests/*" = [ "FBT003", # Boolean positional value in function call "INP001", # File is part of an implicit namespace package "S101", # Use of assert detected + "T201", # print found (useful for manual inspection in long-running tests) ] pydocstyle.convention = "google" @@ -150,7 +152,11 @@ rules.unused-ignore-comment = "error" [tool.pytest] ini_options.addopts = [ "--pdbcls=pdbp:Pdb" ] ini_options.filterwarnings = [] -ini_options.markers = [ "integration: integration tests requiring MODEL2 + data" ] +ini_options.markers = [ + "integration: integration tests requiring MODEL2 + data", + "long_running: slow tests skipped in CI (run with -m long_running)", + "end_to_end: end-to-end estimation tests requiring external data", +] ini_options.norecursedirs = [ "docs" ] [tool.pixi.dependencies] @@ -162,7 +168,9 @@ networkx = "*" prek = "*" pybaum = "*" python = "~=3.14.0" +scikit-learn = "*" scipy = "*" +h5py = ">=3.16.0,<4" [tool.pixi.environments] cuda = { features = [ "cuda" ], solve-group = "cuda" } docs = { features = [ "docs" ], solve-group = "default" } @@ -170,6 +178,7 @@ tests-cpu = { features = [ "tests" ], solve-group = "default" } tests-cuda = { features = [ "tests", "cuda" ], solve-group = "cuda" } type-checking = { features = [ "type-checking" ], solve-group = "default" } tests-cuda12 = { features = [ "tests", "cuda" ], solve-group = "cuda" } +tests-cuda13 = { features = [ "tests", "cuda13" ], solve-group = "cuda13" } [tool.pixi.feature.cuda] platforms = [ "linux-64" ] system-requirements = { cuda = "12" } @@ -182,6 +191,13 @@ mem-cuda = """\ pytest -x -s --pdb --memray --fail-on-increase \ tests/test_likelihood_regression.py::test_likelihood_contributions_large_nobs\ """ +[tool.pixi.feature.cuda13] +platforms = [ "linux-64" ] +system-requirements = { cuda = "13" } +[tool.pixi.feature.cuda13.dependencies] +cuda-nvcc = ">=13" +[tool.pixi.feature.cuda13.pypi-dependencies] +jax = { version = ">=0.9", extras = [ "cuda13" ] } [tool.pixi.feature.docs.tasks] build-docs = { cmd = "jupyter book build --html", cwd = "docs" } view-docs = { cmd = "jupyter book start", cwd = "docs" } @@ -190,10 +206,11 @@ pytest = "*" pytest-cov = "*" pytest-xdist = "*" snakeviz = "*" +xlrd = ">=2" [tool.pixi.feature.tests.target.unix.dependencies] pytest-memray = "*" [tool.pixi.feature.tests.tasks] -tests = "pytest tests" +tests = "pytest tests -m 'not long_running'" tests-with-cov = "pytest tests --cov-report=xml --cov=./" mem = """\ pytest -x -s --pdb --memray --fail-on-increase \ @@ -212,7 +229,7 @@ types-pytz = "*" [tool.pixi.feature.type-checking.tasks] ty = "ty check src tests docs" [tool.pixi.pypi-dependencies] -# optimagic = { git = "https://github.com/optimagic-dev/optimagic.git", branch = "main" } +optimagic = { git = "https://github.com/optimagic-dev/optimagic.git", branch = "probability-allow-fixed-entries" } pdbp = "*" skillmodels = { path = ".", editable = true } [tool.pixi.workspace] diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..bf5ab88b --- /dev/null +++ b/requirements.txt @@ -0,0 +1,66 @@ +# Pip-only requirements for the skillmodels `af-estimator` branch. +# +# Installs every package needed to run the skillmodels test suite and +# the two downstream research applications (`skane-struct-bw`, +# `health-cognition`) -- minus the two applications themselves, which +# their teams provide separately. +# +# Defaults to CPU JAX. For CUDA-12, replace `jax>=0.9` with +# `jax[cuda12]>=0.9` (and provide a CUDA-12 toolkit on the host). See +# https://jax.readthedocs.io/en/latest/installation.html for details. +# +# Usage (Python 3.14 venv): +# pip install -r requirements.txt +# +# Notes on dependencies that come from the downstream apps and are NOT +# direct skillmodels dependencies are marked with `# downstream:` below. + +# Core scientific stack +numpy>=2.4 +pandas>=3 +scipy>=1.16.0 +h5py>=3.16.0,<4 +jax>=0.9 +networkx +filterpy +pybaum>=0.1.3 +scikit-learn>=1.5 # AMN Stage 1 (mixture EM) +statsmodels>=0.14.5 # downstream: regression diagnostics in skane / health-cognition +seaborn # downstream: figure styling in health-cognition + +# Estimation engine — pinned to the optimagic branch the AF estimator +# relies on (`probability-allow-fixed-entries`); the PyPI release does +# not yet carry the required `FixedConstraintWithValue` semantics. +optimagic @ git+https://github.com/optimagic-dev/optimagic.git@probability-allow-fixed-entries +fides>=0.7.8 # downstream: optimagic algorithm used by skane / health-cognition + +# Workflow / pipelines +dags>=0.5.1 +pytask>=0.5.8 +pytask-parallel>=0.5.2 + +# Viz + reporting +plotly>=6.6 +kaleido>=1.2 +jupyter-book>=2 +tabulate>=0.9.0 # downstream: table formatting in skane / health-cognition reports +nbformat>=5.10.4 +ipykernel>=6.29.5 +jupyterlab + +# Data / IO utilities +statadict>=1.1.0 # downstream: Stata variable labels in skane / health-cognition +deepdiff>=8.5.0 # downstream: snapshot diffing in skane / health-cognition +xlrd>=2 # required by `tests/matlab_ces_repro` (CNLSY xls reader) + +# Dev / test / profiling tooling +pytest>=8.4.1 +pytest-cov>=6.2.1 +pytest-xdist>=3.8.0 +pytest-memray; platform_system != 'Windows' +memray>=1.17.2 # downstream: heap profiling driver +snakeviz +pdbp + +# Skillmodels itself, pulled from the `af-estimator` branch. +skillmodels @ git+https://github.com/OpenSourceEconomics/skillmodels.git@af-estimator diff --git a/src/skillmodels/__init__.py b/src/skillmodels/__init__.py index c4fd82f4..ec82fe82 100644 --- a/src/skillmodels/__init__.py +++ b/src/skillmodels/__init__.py @@ -5,34 +5,60 @@ with contextlib.suppress(ImportError): import pdbp # noqa: F401 -from skillmodels.diagnostic_plots import ( +from skillmodels.af import ( + AFEstimationOptions, + AFEstimationResult, + AFInferenceResult, + compute_af_standard_errors, + estimate_af, +) +from skillmodels.amn import ( + AMNEstimationOptions, + AMNEstimationResult, + AMNInferenceResult, + compute_amn_standard_errors, + estimate_amn, +) +from skillmodels.chs import ( + create_state_ranges, + get_filtered_states, + get_maximization_inputs, +) +from skillmodels.common.diagnostic_plots import ( plot_likelihood_contributions, plot_residual_boxplots, ) -from skillmodels.filtered_states import get_filtered_states -from skillmodels.maximization_inputs import get_maximization_inputs -from skillmodels.model_spec import ( +from skillmodels.common.model_spec import ( AnchoringSpec, EstimationOptions, FactorSpec, ModelSpec, Normalizations, ) -from skillmodels.process_debug_data import create_state_ranges -from skillmodels.simulate_data import simulate_dataset, simulate_policy_effect -from skillmodels.variance_decomposition import ( +from skillmodels.common.simulate_data import simulate_dataset, simulate_policy_effect +from skillmodels.common.variance_decomposition import ( decompose_measurement_variance, summarize_measurement_reliability, ) __all__ = [ + "AFEstimationOptions", + "AFEstimationResult", + "AFInferenceResult", + "AMNEstimationOptions", + "AMNEstimationResult", + "AMNInferenceResult", "AnchoringSpec", "EstimationOptions", "FactorSpec", "ModelSpec", "Normalizations", + "compute_af_standard_errors", + "compute_amn_standard_errors", "create_state_ranges", "decompose_measurement_variance", + "estimate_af", + "estimate_amn", "get_filtered_states", "get_maximization_inputs", "plot_likelihood_contributions", diff --git a/src/skillmodels/af/__init__.py b/src/skillmodels/af/__init__.py new file mode 100644 index 00000000..319ff8a9 --- /dev/null +++ b/src/skillmodels/af/__init__.py @@ -0,0 +1,23 @@ +"""Antweiler-Freyberger estimator for latent factor models. + +Iterative period-by-period MLE with Halton quadrature for numerical +integration, following Antweiler and Freyberger (2025). +""" + +from skillmodels.af.estimate import estimate_af +from skillmodels.af.inference import ( + AFInferenceResult, + compute_af_standard_errors, +) +from skillmodels.af.posterior_states import get_af_posterior_states +from skillmodels.af.types import AFEstimationOptions, AFEstimationResult, AFPeriodResult + +__all__ = [ + "AFEstimationOptions", + "AFEstimationResult", + "AFInferenceResult", + "AFPeriodResult", + "compute_af_standard_errors", + "estimate_af", + "get_af_posterior_states", +] diff --git a/src/skillmodels/af/batching.py b/src/skillmodels/af/batching.py new file mode 100644 index 00000000..72070cdf --- /dev/null +++ b/src/skillmodels/af/batching.py @@ -0,0 +1,100 @@ +"""Auto-sizing helpers for the AF likelihood's memory-aware batching. + +The AF likelihood replaces the outermost ``jax.vmap`` over observations +with ``jax.lax.map`` when ``n_obs_per_batch`` is smaller than ``n_obs``. +This module provides a simple heuristic that picks an ``n_obs_per_batch`` +from a target-bytes budget, mirroring pylcm's approach (see +``pylcm/src/lcm/simulation/initial_conditions.py:547-560``). + +The heuristic is intentionally crude: it multiplies the per-observation +Halton grid footprint by a safety factor and divides a budget (256 MB by +default, overridable via the ``SKILLMODELS_AF_TARGET_BATCH_BYTES`` +environment variable) by that product. No GPU-specific probing is done; +users who need tighter control can set ``n_obs_per_batch`` explicitly on +``AFEstimationOptions``. +""" + +import logging +import os + +_DEFAULT_TARGET_BATCH_BYTES = 2**28 # 256 MB +_ENV_VAR_TARGET = "SKILLMODELS_AF_TARGET_BATCH_BYTES" +_BYTES_PER_FLOAT64 = 8 + +# Empirical multiplier reflecting that a single observation's forward + +# backward tape at full state/shock/inv_shock resolution retains several +# copies of the integrand footprint. This sized conservatively high: a +# smaller batch is always safe, a larger batch can OOM. +_SAFETY_FACTOR = 16 + +logger = logging.getLogger(__name__) + + +def target_batch_bytes() -> int: + """Return the bytes budget per observation batch. + + Honours ``SKILLMODELS_AF_TARGET_BATCH_BYTES`` when set to a positive + integer, otherwise returns the default 256 MB budget. + """ + override = os.environ.get(_ENV_VAR_TARGET) + if override is None: + return _DEFAULT_TARGET_BATCH_BYTES + try: + parsed = int(override) + except ValueError: + logger.warning( + "Ignoring %s=%r: not a valid integer.", + _ENV_VAR_TARGET, + override, + ) + return _DEFAULT_TARGET_BATCH_BYTES + if parsed <= 0: + logger.warning("Ignoring %s=%r: must be positive.", _ENV_VAR_TARGET, override) + return _DEFAULT_TARGET_BATCH_BYTES + return parsed + + +def auto_n_obs_per_batch( + *, + n_obs: int, + n_halton_points: int, + n_halton_points_shock: int, # noqa: ARG001 + n_latent: int, + n_endogenous: int, + target_bytes: int | None = None, +) -> int: + """Pick ``n_obs_per_batch`` from a target-bytes budget. + + The AF transition-period likelihood forms a joint Halton draw of + size ``(n_halton_points, 2 * n_latent + n_endogenous)`` rather than + an outer product of per-axis grids, so per-observation memory is + linear in ``n_halton_points``. The per-observation footprint is + estimated as + + ``n_halton_points * (n_latent + n_endogenous + 1) * 8 * SAFETY_FACTOR``. + + ``n_halton_points_shock`` is retained in the signature for API + compatibility with the earlier per-axis layout but is unused now + that draws are joint. + + Args: + n_obs: Total number of observations. + n_halton_points: Halton grid size (joint dimension count unused here). + n_halton_points_shock: Legacy shock Halton count, ignored. + n_latent: Latent factor count. + n_endogenous: Endogenous (investment) factor count. + target_bytes: Budget per batch. Defaults to `target_batch_bytes()`. + + Return: + A positive integer no larger than ``n_obs``. + """ + budget = target_bytes if target_bytes is not None else target_batch_bytes() + per_obs_bytes = ( + n_halton_points + * (n_latent + n_endogenous + 1) + * _BYTES_PER_FLOAT64 + * _SAFETY_FACTOR + ) + per_obs_bytes = max(per_obs_bytes, 1) + batch = max(1, budget // per_obs_bytes) + return min(batch, n_obs) diff --git a/src/skillmodels/af/estimate.py b/src/skillmodels/af/estimate.py new file mode 100644 index 00000000..e78ebc43 --- /dev/null +++ b/src/skillmodels/af/estimate.py @@ -0,0 +1,444 @@ +"""Main driver for the AF estimation procedure.""" + +import dataclasses +import gc + +import jax +import jax.numpy as jnp +import numpy as np +import optimagic as om +import pandas as pd +from jax import Array + +from skillmodels.af.initial_period import estimate_initial_period +from skillmodels.af.params import get_measurements_per_factor +from skillmodels.af.transition_period import estimate_transition_period +from skillmodels.af.types import ( + AFEstimationOptions, + AFEstimationResult, + AFPeriodResult, + ChainLink, + ConditionalDistribution, + MixtureComponent, +) +from skillmodels.af.validate import validate_af_model +from skillmodels.amn.estimate import estimate_amn +from skillmodels.common.model_spec import ModelSpec +from skillmodels.common.process_model import process_model + + +def estimate_af( + model_spec: ModelSpec, + data: pd.DataFrame, + af_options: AFEstimationOptions | None = None, + start_params: pd.DataFrame | None = None, + fixed_params: pd.DataFrame | None = None, + constraints: list[om.constraints.Constraint] | None = None, +) -> AFEstimationResult: + """Estimate a latent factor model using the Antweiler-Freyberger method. + + Sequential period-by-period MLE with Halton quadrature for numerical + integration, following Antweiler and Freyberger (2025). + + The procedure estimates one period at a time: + - Step 0: Fit initial distribution and measurement params for period 0 + - Step t (t >= 1): Estimate transition and measurement params using the + estimated distribution from previous periods + + Args: + model_spec: Model specification (same as for CHS estimation). + data: Dataset in long format with MultiIndex (id, period). + af_options: AF-specific estimation options. If None, uses defaults. + start_params: Optional starting parameter values. If provided, any + matching index entries override the heuristic defaults. Uses the + same 4-level MultiIndex as CHS params (category, period, name1, + name2). Unmatched entries keep their heuristic values. + fixed_params: Optional DataFrame with a "value" column pinning + specified parameters to fixed values. Bounds are clamped equal + to the value so the optimizer excludes them. Used, e.g., to pin + time-invariant latent factors to identity transitions with zero + shocks (same convention as CHS augmented periods). + constraints: Optional list of optimagic Constraint objects. Only + `om.EqualityConstraint` entries that select via + `skillmodels.common.constraints.select_by_loc` are honoured: their + members are propagated forward through the chain — once any + member of an equality group has been estimated, every other + member (including those at not-yet-estimated periods) is + pinned to that value via `fixed_params`. Other constraint + types are ignored (AF's per-period MLE handles model-implied + within-period constraints internally). + + Return: + AFEstimationResult with per-period results and combined parameters. + + """ + jax.config.update("jax_enable_x64", val=True) + + if af_options is None: + af_options = AFEstimationOptions() + + validate_af_model(model_spec) + processed_model = process_model(model_spec) + + # If AMN-based starts are requested, run the full AMN three-stage + # estimator upfront and overlay its parameter estimates onto the + # caller-supplied `start_params` (user values win on overlap). + # After this the per-period MLE proceeds with `initialization_strategy + # = "constant"` internally so the within-period Spearman pre-pass is + # skipped (AMN's values are already in the optimizer's starting + # neighbourhood). + if af_options.initialization_strategy == "amn": + amn_result = estimate_amn(model_spec=model_spec, data=data) + amn_start = amn_result.all_params[["value"]] + if start_params is not None: + user_idx = start_params.index + amn_start = amn_start.drop( + index=amn_start.index.intersection(user_idx), + errors="ignore", + ) + start_params = pd.concat([amn_start, start_params]).sort_index() + else: + start_params = amn_start + af_options = AFEstimationOptions( + n_halton_points=af_options.n_halton_points, + n_halton_points_shock=af_options.n_halton_points_shock, + n_mixture_components=af_options.n_mixture_components, + optimizer_algorithm=af_options.optimizer_algorithm, + optimizer_options=dict(af_options.optimizer_options), + two_stage=af_options.two_stage, + coarse_fraction=af_options.coarse_fraction, + stability_floor=af_options.stability_floor, + n_obs_per_batch=af_options.n_obs_per_batch, + initialization_strategy="constant", + ) + + # Extract data arrays per period + n_periods = processed_model.dimensions.n_periods + factors = processed_model.labels.latent_factors + controls_names = processed_model.labels.controls + observed_factors = processed_model.labels.observed_factors + + # Identify endogenous (investment) factors + endog_info = processed_model.endogenous_factors_info + endogenous_factors = tuple( + f + for f in factors + if f in endog_info.factor_info and endog_info.factor_info[f].is_endogenous + ) + state_factors = tuple(f for f in factors if f not in endogenous_factors) + + period_data = _extract_period_data( + data, + n_periods, + factors, + controls_names, + model_spec, + observed_factors=observed_factors, + ) + + equality_groups = _extract_equality_groups(constraints) + + # Step 0: Initial period + period_0_result, cond_dist = estimate_initial_period( + model_spec=model_spec, + processed_model=processed_model, + measurements=period_data[0]["measurements"], + controls=period_data[0]["controls"], + af_options=af_options, + state_factors=state_factors, + start_params=start_params, + fixed_params=fixed_params, + observed_factors=observed_factors, + observed_factor_values=period_data[0].get("observed_factors"), + ) + + period_results: list[AFPeriodResult] = [period_0_result] + conditional_dists: list[ConditionalDistribution] = [cond_dist] + fixed_params = _propagate_equality_groups( + period_results=period_results, + fixed_params=fixed_params, + equality_groups=equality_groups, + ) + + # Steps 1..T-1: Transition periods + for t in range(1, n_periods): + measurements_pt = get_measurements_per_factor(model_spec.factors, period=t) + if not measurements_pt: + break + + prev_period_params = period_results[-1].params + + period_t_result, cond_dist = estimate_transition_period( + period=t, + model_spec=model_spec, + processed_model=processed_model, + measurements=period_data[t]["measurements"], + controls=period_data[t]["controls"], + prev_measurements=period_data[t - 1]["measurements"], + prev_controls=period_data[t - 1]["controls"], + prev_period_params=prev_period_params, + prev_distribution=cond_dist, + af_options=af_options, + endogenous_factors=endogenous_factors, + observed_factors=observed_factors, + observed_factor_data=period_data.get(t - 1, {}).get( + "observed_factors", None + ), + start_params=start_params, + fixed_params=fixed_params, + ) + period_results.append(period_t_result) + conditional_dists.append(cond_dist) + fixed_params = _propagate_equality_groups( + period_results=period_results, + fixed_params=fixed_params, + equality_groups=equality_groups, + ) + + # Combine parameters from all periods + all_params = pd.concat([r.params for r in period_results]) + + # Free the XLA compilation cache + any unreferenced device buffers + # before materialising the result. The per-period likelihoods and + # gradients leave hundreds of MB of compiled executables and stale + # intermediates on the device; without this the GPU→host copy in + # `_to_numpy(...)` has been observed to OOM on a host-side staging + # allocation, even though the arrays themselves are small. + jax.clear_caches() + gc.collect() + + # Drop `samples_per_component` (the multi-GB per-period + # `(n_halton, n_obs, n_state)` importance buffer) from every + # conditional distribution BEFORE materialising anything else. + # Otherwise the next `_to_numpy(c.mean)` call has to fit a staging + # buffer alongside live `samples_per_component` device arrays and + # OOMs. Mutating the list in place and forcing a GC pass releases + # the underlying device buffers immediately; only the small + # summary stats and chain history remain on the GPU when conversion + # starts. + for idx, cd in enumerate(conditional_dists): + conditional_dists[idx] = dataclasses.replace(cd, samples_per_component=()) + del cd + gc.collect() + jax.clear_caches() + + # Materialise every remaining JAX array in the result as a numpy + # array. Downstream consumers (pickling, plotting, posterior_states) + # don't need GPU residency, and leaving the arrays as `jax.Array` + # would force materialisation at pickle time -- which on a busy + # device routinely OOMs inside `__reduce__`. + conditional_dists_compact = tuple( + _to_numpy_conditional_distribution(cd) for cd in conditional_dists + ) + + return AFEstimationResult( + period_results=tuple(period_results), + all_params=all_params, + model_spec=model_spec, + conditional_distributions=conditional_dists_compact, + ) + + +def _to_numpy(value: Array | np.ndarray | None) -> np.ndarray | None: + """Materialise a JAX array as numpy; pass `None` through.""" + if value is None: + return None + return np.asarray(jax.device_get(value)) + + +def _to_numpy_chain_link(link: ChainLink) -> ChainLink: + """Convert every JAX field of a `ChainLink` to numpy.""" + return dataclasses.replace( + link, + transition_params=_to_numpy(link.transition_params), + shock_sds=_to_numpy(link.shock_sds), + shock_factor_indices=_to_numpy(link.shock_factor_indices), + inv_eq_params=_to_numpy(link.inv_eq_params), + inv_sds=_to_numpy(link.inv_sds), + obs_factor_values=_to_numpy(link.obs_factor_values), + ) + + +def _to_numpy_conditional_distribution( + cond_dist: ConditionalDistribution, +) -> ConditionalDistribution: + """Convert all arrays to numpy and drop `samples_per_component`.""" + new_components = tuple( + MixtureComponent( + mean=_to_numpy(c.mean), # ty: ignore[invalid-argument-type] + chol_cov=_to_numpy(c.chol_cov), # ty: ignore[invalid-argument-type] + ) + for c in cond_dist.components + ) + new_chain_links = tuple(_to_numpy_chain_link(cl) for cl in cond_dist.chain_links) + return dataclasses.replace( + cond_dist, + mixture_weights=_to_numpy(cond_dist.mixture_weights), + components=new_components, + samples_per_component=(), + conditional_weights=_to_numpy(cond_dist.conditional_weights), + cond_means=_to_numpy(cond_dist.cond_means), + cond_chols=_to_numpy(cond_dist.cond_chols), + chain_links=new_chain_links, + ) + + +def _extract_period_data( + data: pd.DataFrame, + n_periods: int, + _factors: tuple[str, ...], + controls_names: tuple[str, ...], + model_spec: ModelSpec, + observed_factors: tuple[str, ...] = (), +) -> dict[int, dict[str, Array]]: + """Extract measurement, control, and observed factor arrays per period. + + Return: + Dict mapping period -> {"measurements": Array, "controls": Array, + "observed_factors": Array (if any)}. + + """ + period_data: dict[int, dict[str, Array]] = {} + + idx_names = data.index.names + period_col = str(idx_names[1]) + + for t in range(n_periods): + measurements_pt = get_measurements_per_factor(model_spec.factors, period=t) + if not measurements_pt: + continue + + all_measures: list[str] = [] + seen: set[str] = set() + for measures in measurements_pt.values(): + for m in measures: + if m not in seen: + seen.add(m) + all_measures.append(m) + + period_mask = data.index.get_level_values(period_col) == t + period_df = data.loc[period_mask] + + meas_cols = [c for c in all_measures if c in period_df.columns] + meas_array = jnp.array( + period_df[meas_cols].to_numpy(dtype=np.float64, na_value=np.nan), + ) + + ctrl_arrays = [] + for ctrl in controls_names: + if ctrl == "constant": + ctrl_arrays.append(np.ones(len(period_df))) + elif ctrl in period_df.columns: + ctrl_arrays.append(period_df[ctrl].to_numpy(dtype=np.float64)) + else: + ctrl_arrays.append(np.zeros(len(period_df))) + ctrl_array = jnp.array(np.column_stack(ctrl_arrays)) + + entry: dict[str, Array] = { + "measurements": meas_array, + "controls": ctrl_array, + } + + if observed_factors: + entry["observed_factors"] = _extract_observed_factors( + period_df, observed_factors + ) + + period_data[t] = entry + + return period_data + + +def _extract_observed_factors( + period_df: pd.DataFrame, + observed_factors: tuple[str, ...], +) -> Array: + """Extract observed factor values from a period's DataFrame.""" + obs_arrays = [ + period_df[of].to_numpy(dtype=np.float64) + if of in period_df.columns + else np.zeros(len(period_df)) + for of in observed_factors + ] + return jnp.array(np.column_stack(obs_arrays)) + + +def _extract_equality_groups( + constraints: list[om.constraints.Constraint] | None, +) -> list[pd.MultiIndex]: + """Pull cross-period equality groups out of an optimagic constraints list. + + Honours `om.EqualityConstraint` instances whose selector is built via + `functools.partial(skillmodels.common.constraints.select_by_loc, loc=...)`. + The `loc` keyword carries the `pd.MultiIndex` of params that must be + equal — those are the equality groups returned here. + """ + if not constraints: + return [] + groups: list[pd.MultiIndex] = [] + for c in constraints: + if not isinstance(c, om.EqualityConstraint): + continue + selector = c.selector + keywords = getattr(selector, "keywords", None) + if not keywords or "loc" not in keywords: + continue + loc = keywords["loc"] + if isinstance(loc, pd.MultiIndex) and len(loc) > 1: + groups.append(loc) + return groups + + +def _propagate_equality_groups( + *, + period_results: list[AFPeriodResult], + fixed_params: pd.DataFrame | None, + equality_groups: list[pd.MultiIndex], +) -> pd.DataFrame | None: + """Propagate just-estimated values to all members of cross-period equality groups. + + For each equality group: if any member is in the union of + `period_results[*].params`, pin every other member of the group + (that is not already pinned by `fixed_params`) to that member's + estimated value via additions to `fixed_params`. Subsequent + periods' MLEs see those entries as fixed, enforcing equality + across the chain. + """ + if not equality_groups: + return fixed_params + + estimated = pd.concat([r.params for r in period_results]) + if "value" in estimated.columns: + estimated_series = estimated["value"] + else: + estimated_series = estimated.iloc[:, 0] + + if fixed_params is None or len(fixed_params) == 0: + index_names = ["category", "period", "name1", "name2"] + running = pd.DataFrame( + {"value": []}, + index=pd.MultiIndex.from_tuples([], names=index_names), + ) + else: + running = fixed_params.copy() + + new_locs: list[tuple] = [] + new_values: list[float] = [] + for group in equality_groups: + in_estimated = [loc for loc in group if loc in estimated_series.index] + if not in_estimated: + continue + anchor_value = float(estimated_series.loc[in_estimated[0]]) + for loc in group: + if loc in running.index: + continue + new_locs.append(loc) + new_values.append(anchor_value) + if not new_locs: + return running + + addition = pd.DataFrame( + {"value": new_values}, + index=pd.MultiIndex.from_tuples(new_locs, names=running.index.names), + ) + return pd.concat([running, addition]) diff --git a/src/skillmodels/af/halton.py b/src/skillmodels/af/halton.py new file mode 100644 index 00000000..bbd77dd4 --- /dev/null +++ b/src/skillmodels/af/halton.py @@ -0,0 +1,92 @@ +"""Halton quasi-random sequence generation for numerical quadrature.""" + +import jax.numpy as jnp +import numpy as np +from jax import Array +from scipy.stats import qmc + + +def create_halton_nodes_and_weights( + n_points: int, + n_dim: int, + *, + seed: int = 0, +) -> tuple[Array, Array]: + """Create Halton quadrature nodes transformed to standard normal. + + Generate a low-discrepancy Halton sequence in [0, 1]^d, then transform + to standard normal quantiles via the inverse CDF. Weights are uniform + (1/n_points) since the Halton sequence provides quasi-uniform coverage. + + Args: + n_points: Number of quadrature points. + n_dim: Dimensionality of the sequence. + seed: Seed for scrambled Halton sequence (for reproducibility). + + Return: + Tuple of (nodes, weights) where: + - nodes: shape (n_points, n_dim), standard normal quantiles + - weights: shape (n_points,), uniform weights summing to 1 + + """ + sampler = qmc.Halton(d=n_dim, scramble=True, seed=seed) + # Generate uniform [0, 1] samples, skip first point (often degenerate) + uniform_samples = sampler.random(n=n_points + 1)[1:] + + # Clip to avoid infinite values at 0 and 1 + uniform_samples = np.clip(uniform_samples, 1e-10, 1 - 1e-10) + + # Transform to standard normal via inverse CDF + from scipy.stats import norm # noqa: PLC0415 + + normal_nodes = norm.ppf(uniform_samples) + + nodes = jnp.array(normal_nodes, dtype=jnp.float64) + weights = jnp.ones(n_points, dtype=jnp.float64) / n_points + + return nodes, weights + + +def transform_nodes_to_conditional( + standard_nodes: Array, + mean: Array, + chol_cov: Array, +) -> Array: + """Transform standard normal nodes to a conditional distribution. + + Apply the affine transformation: x = mean + chol_cov @ z where z are + standard normal nodes. + + Args: + standard_nodes: Shape (n_points, n_dim), standard normal quantiles. + mean: Shape (n_dim,), mean of the target distribution. + chol_cov: Shape (n_dim, n_dim), lower Cholesky of the target covariance. + + Return: + Transformed nodes, shape (n_points, n_dim). + + """ + return mean + standard_nodes @ chol_cov.T + + +def create_shock_nodes_and_weights( + n_points: int, + n_shocks: int, + *, + seed: int = 42, +) -> tuple[Array, Array]: + """Create quadrature nodes for production shocks. + + Separate Halton sequence for the shock integration dimension, using + a different seed to avoid correlation with the state nodes. + + Args: + n_points: Number of quadrature points per shock dimension. + n_shocks: Number of independent shock dimensions. + seed: Seed for the Halton sequence. + + Return: + Tuple of (nodes, weights) with nodes shape (n_points, n_shocks). + + """ + return create_halton_nodes_and_weights(n_points, n_shocks, seed=seed) diff --git a/src/skillmodels/af/inference.py b/src/skillmodels/af/inference.py new file mode 100644 index 00000000..2f644b27 --- /dev/null +++ b/src/skillmodels/af/inference.py @@ -0,0 +1,1059 @@ +"""Score-bootstrap standard errors for the AF estimator. + +Implements the score bootstrap procedure prescribed in Antweiler & +Freyberger (2025) §4.2 (inspired by Armstrong, Bertanha & Hong 2014). +The AF estimator is a sequential multi-step MLE; its asymptotic variance +includes terms that propagate the estimation uncertainty of earlier +steps, which makes the analytical sandwich + + V = A^{-1} Omega A^{-T} / n + +incorrect when computed without those cross-step terms. AF §4.2 puts +this directly: + + "this asymptotic variance is incorrect because it ignores the + estimation errors of tau_{t-1}, ..., tau_1, which is the second + term in the expansion above. To account for those, we would have + to calculate ... which is very difficult because the likelihood is + (partly) simulated and not available in closed form. To avoid + these calculations, we use a score bootstrap procedure inspired by + Armstrong, Bertanha, and Hong (2014)." + +This module exposes a single inference entry point, +:func:`compute_af_standard_errors`, which implements that score +bootstrap. It avoids re-estimating the model B times: per-observation +scores are computed once at the optimum, then for each of ``n_boot`` +replicates we resample caseids with replacement, average their scores, +and take a one-step Newton update from the optimum. The empirical +standard deviation of the resulting parameter draws is the bootstrap +standard error. + +""" + +from collections.abc import Callable, Mapping +from dataclasses import dataclass, field +from types import MappingProxyType +from typing import Any, NamedTuple + +import jax +import jax.numpy as jnp +import numpy as np +import pandas as pd +from jax import Array + +from skillmodels.af.batching import auto_n_obs_per_batch +from skillmodels.af.estimate import _extract_period_data +from skillmodels.af.halton import create_halton_nodes_and_weights +from skillmodels.af.initial_period import ( + _build_loading_mask, + _get_ordered_measures, +) +from skillmodels.af.likelihood import ( + _parse_initial_params, + _parse_transition_params, + af_per_obs_loglike_initial, + af_per_obs_loglike_transition, +) +from skillmodels.af.params import ( + build_optimagic_inputs, + get_measurements_per_factor, +) +from skillmodels.af.transition_period import ( + _extract_prev_measurement_params, + _get_raw_transition_functions, + _prepare_transition_inputs, +) +from skillmodels.af.types import ( + AFEstimationOptions, + AFEstimationResult, + ChainLink, + ConditionalDistribution, +) +from skillmodels.common.constraints import FixedConstraintWithValue +from skillmodels.common.model_spec import ModelSpec +from skillmodels.common.process_model import process_model +from skillmodels.common.types import ProcessedModel + + +@dataclass(frozen=True) +class AFInferenceResult: + """Score-bootstrap inference result for the AF estimator. + + See :func:`compute_af_standard_errors` for the procedure (AF 2025 + §4.2 / Armstrong-Bertanha-Hong 2014). + """ + + standard_errors: pd.Series + """Bootstrap standard errors indexed by ``all_params.index``. + + SEs are the empirical standard deviation across bootstrap + replicates of each parameter's one-step Newton shift from the + point estimate. Fixed-parameter and constrained-direction entries + are reported as zero (or NaN where the period's information matrix + is singular on that direction). + """ + + vcov: pd.DataFrame + """Variance-covariance matrix, rows and columns share + ``all_params.index``. Computed from + ``replicate_params.cov(ddof=1)`` so SEs and vcov are internally + consistent. + """ + + replicate_params: pd.DataFrame + """``(n_boot, n_params)`` DataFrame of bootstrap parameter draws. + + Each row is ``theta_hat + delta_b`` where ``delta_b = -A^{-1} * + bar_g_b``, ``bar_g_b`` is the mean per-cluster score in bootstrap + replicate ``b``, and ``A`` is the period's information matrix at + the optimum. Columns share ``all_params.index``; pinned-parameter + columns are constant at the point estimate. + """ + + n_clusters: int + """Number of caseids resampled per replicate (= number of unique + caseids in the data). + """ + + n_boot: int + """Number of bootstrap replicates drawn.""" + + +def compute_af_standard_errors( + result: AFEstimationResult, + data: pd.DataFrame, + af_options: AFEstimationOptions | None = None, + *, + n_boot: int = 10_000, + seed: int = 0, +) -> AFInferenceResult: + """Score-resampling cluster bootstrap for the AF estimator. + + Implements Antweiler & Freyberger (2025) §4.2 (Armstrong-Bertanha-Hong + score bootstrap). Per-observation scores are computed once at the + optimum; for each of ``n_boot`` replicates we resample caseids with + replacement, average the resampled scores, and apply a one-step + Newton update from the optimum: + + theta_b = theta_hat - A_t^{-1} * bar_g_b + + where ``A_t`` is the period-``t`` information matrix and + ``bar_g_b`` is the bootstrap-averaged per-obs score restricted to + period-``t`` free parameters. Periods are resampled independently + — joint resampling would couple periods through the + block-diagonal information matrix the same way separate draws do, + so we report own-block bootstrap SEs. + + The analytical Newey-McFadden sandwich is **not** provided: as AF + §4.2 notes, the closed-form variance ignores estimation error in + the previous-period nuisance parameters tau_{t-1}, ..., tau_1, so + it is incorrect for any t >= 1. The score bootstrap captures this + propagation. + + For ``n_boot=10000`` and ``n_caseids=1500`` this typically takes + seconds rather than days (no re-estimation per replicate). + + Args: + result: Output of ``estimate_af``. + data: The dataset used for estimation; the caseid level of its + MultiIndex defines the bootstrap clusters. + af_options: Options used at estimation time. + n_boot: Number of bootstrap replicates. + seed: Seed for the resampling RNG. + + Return: + :class:`AFInferenceResult` with bootstrap SEs, vcov computed + from the replicate distribution, and the full + replicate-by-parameter DataFrame. + + """ + if af_options is None: + af_options = AFEstimationOptions() + + jax.config.update("jax_enable_x64", val=True) + + model_spec = result.model_spec + processed_model = process_model(model_spec) + + n_periods = processed_model.dimensions.n_periods + latent_factors = processed_model.labels.latent_factors + controls_names = processed_model.labels.controls + observed_factors = processed_model.labels.observed_factors + + endog_info = processed_model.endogenous_factors_info + endogenous_factors = tuple( + f + for f in latent_factors + if f in endog_info.factor_info and endog_info.factor_info[f].is_endogenous + ) + + period_data = _extract_period_data( + data, + n_periods, + latent_factors, + controls_names, + model_spec, + observed_factors=observed_factors, + ) + + metas = _build_period_metas( + result=result, + period_data=period_data, + model_spec=model_spec, + processed_model=processed_model, + af_options=af_options, + observed_factors=observed_factors, + endogenous_factors=endogenous_factors, + ) + + # Precompute per-period score and information matrices at the + # optimum. The bootstrap then resamples score rows (caseids) and + # applies a one-step Newton update; no re-estimation per replicate. + period_score_info = _compute_block_diagonal_sandwich(result, metas) + + rng = np.random.default_rng(seed) + all_params = result.all_params + replicate_values = np.tile(all_params["value"].to_numpy()[None, :], (n_boot, 1)) + + pos_lookup = {tuple(loc): i for i, loc in enumerate(all_params.index)} + + n_clusters = int(metas[0].loglike_kwargs["measurements"].shape[0]) + + for period_res in period_score_info: + score = np.array(period_res.score_matrix) # (n, n_free_own) + info = np.array(period_res.information_matrix) + # Use pinv for the same null-space-tolerant reasons as + # ``_block_diagonal_sandwich_single``. + a_inv = np.linalg.pinv(info) + + idx = rng.integers(0, n_clusters, size=(n_boot, n_clusters)) + mean_score = score[idx].mean(axis=1) # (n_boot, n_free_own) + delta = -mean_score @ a_inv.T # (n_boot, n_free_own); one-step shift + + global_cols = np.array( + [pos_lookup[loc] for loc in period_res.free_param_locs], + dtype=np.int64, + ) + replicate_values[:, global_cols] += delta + + replicate_params = pd.DataFrame(replicate_values, columns=all_params.index) + standard_errors = pd.Series( + replicate_params.std(axis=0, ddof=1).to_numpy(), + index=all_params.index, + name="standard_error", + ) + # Variance-covariance from the replicate distribution. Pinned-parameter + # rows/columns are zero (constant column → zero variance/covariance). + vcov_values = replicate_params.cov(ddof=1).to_numpy() + vcov = pd.DataFrame( + vcov_values, + index=all_params.index, + columns=all_params.index, + ) + + return AFInferenceResult( + standard_errors=standard_errors, + vcov=vcov, + replicate_params=replicate_params, + n_clusters=n_clusters, + n_boot=n_boot, + ) + + +# --------------------------------------------------------------------------- +# Period metadata: all the static info we need for both sandwich modes. +# --------------------------------------------------------------------------- + + +@dataclass(frozen=True) +class _PeriodMeta: + """Precomputed static metadata for one period's likelihood. + + Pure-Python dataclass; JAX arrays live in ``loglike_kwargs`` and + ``propagation``. + """ + + period: int + is_initial: bool + slice_start: int + slice_stop: int + params_df: pd.DataFrame + loglike_kwargs: MappingProxyType[str, Any] + """Keyword arguments forwarded to ``af_per_obs_loglike_initial`` (if + ``is_initial``) or ``af_per_obs_loglike_transition`` otherwise. + """ + parse_kwargs: MappingProxyType[str, Any] + """Keyword arguments forwarded to ``_parse_initial_params`` or + ``_parse_transition_params`` respectively. Used by the Phase 2 chain. + """ + n_components: int + n_factors_joint: int + """Joint factor count in the initial mixture (state_latent + observed). + Only meaningful for the initial period; zero otherwise. + """ + n_state: int + """State-factor count (``n_state_latent`` in the initial period; + ``n_state_factors`` in transition periods). + """ + n_endog: int + n_shock: int + n_observed_factors: int + state_factor_indices_in_joint: tuple[int, ...] + """Integer positions within the joint factor vector at which state + factors live (the complement is observed factors). Used to marginalise + the joint cond-dist to its state-factor sub-block. + """ + target_idx_in_joint: tuple[int, ...] = () + """Initial-period only: positions of the *target* state factors (the + ones whose marginal we want carry-over samples for) within + `joint_factors`. Differs from ``state_factor_indices_in_joint`` when + the joint includes an endogenous factor with ``has_initial_distribution=True`` + that should be excluded from the carry-over. + """ + obs_idx_in_joint: tuple[int, ...] = () + """Initial-period only: positions of observed factors within + `joint_factors`. Empty for transition-period metas. + """ + propagation: MappingProxyType[str, Any] = field( + default_factory=lambda: MappingProxyType({}) + ) + """Extra JAX-pure bits for propagation of the conditional distribution + through this period's transition. Only populated for transition + periods. Keys: ``joint_nodes``, ``combined_transition``, + ``obs_factor_values``, ``shock_factor_indices``. + """ + + +def _build_period_metas( + *, + result: AFEstimationResult, + period_data: dict[int, dict[str, Array]], + model_spec: ModelSpec, + processed_model: ProcessedModel, + af_options: AFEstimationOptions, + observed_factors: tuple[str, ...], + endogenous_factors: tuple[str, ...], +) -> tuple[_PeriodMeta, ...]: + """Build per-period metadata objects for both inference modes.""" + metas: list[_PeriodMeta] = [] + offset = 0 + for period_result in result.period_results: + t = period_result.period + params_df = period_result.params + length = len(params_df) + + if t == 0: + meta = _build_initial_period_meta( + period_result_params=params_df, + slice_start=offset, + slice_stop=offset + length, + model_spec=model_spec, + processed_model=processed_model, + af_options=af_options, + data_at_period=period_data[0], + observed_factors=observed_factors, + endogenous_factors=endogenous_factors, + ) + else: + prev_period_params = result.period_results[t - 1].params + prev_cond_dist = result.conditional_distributions[t - 1] + meta = _build_transition_period_meta( + period=t, + period_result_params=params_df, + slice_start=offset, + slice_stop=offset + length, + prev_period_params=prev_period_params, + prev_cond_dist=prev_cond_dist, + model_spec=model_spec, + processed_model=processed_model, + af_options=af_options, + data_at_period=period_data[t], + prev_data_at_period=period_data[t - 1], + endogenous_factors=endogenous_factors, + observed_factors=observed_factors, + ) + metas.append(meta) + offset += length + return tuple(metas) + + +def _build_initial_period_meta( + *, + period_result_params: pd.DataFrame, + slice_start: int, + slice_stop: int, + model_spec: ModelSpec, + processed_model: ProcessedModel, + af_options: AFEstimationOptions, + data_at_period: Mapping[str, Array], + observed_factors: tuple[str, ...], + endogenous_factors: tuple[str, ...] = (), +) -> _PeriodMeta: + factors = processed_model.labels.latent_factors + controls_names = processed_model.labels.controls + n_components = af_options.n_mixture_components + + reconstructed_factors = tuple( + f for f in factors if not model_spec.factors[f].has_initial_distribution + ) + state_latent_factors = tuple(f for f in factors if f not in reconstructed_factors) + n_state_latent = len(state_latent_factors) + n_obs_factors = len(observed_factors) + n_joint = n_state_latent + n_obs_factors + state_factor_indices_in_joint = tuple(range(n_state_latent)) + + # Target factors for the carry-over sample = state_latent minus + # endogenous (matches what `estimate_initial_period` does in the + # estimation path). + joint_factors = state_latent_factors + observed_factors + target_factors = tuple( + f for f in state_latent_factors if f not in endogenous_factors + ) + target_idx_in_joint = tuple(joint_factors.index(f) for f in target_factors) + obs_idx_in_joint = tuple(joint_factors.index(f) for f in observed_factors) + n_state_target = len(target_factors) + + measurements_p0 = get_measurements_per_factor(model_spec.factors, period=0) + measurements_p0_filtered = { + f: m for f, m in measurements_p0.items() if f in state_latent_factors + } + all_measures_full = _get_ordered_measures(measurements_p0) + all_measures = _get_ordered_measures(measurements_p0_filtered) + + measurements = data_at_period["measurements"] + if len(all_measures) != len(all_measures_full): + col_indices = jnp.array( + [all_measures_full.index(m) for m in all_measures], dtype=jnp.int32 + ) + measurements = measurements[:, col_indices] + + loading_mask = _build_loading_mask( + all_measures, state_latent_factors, measurements_p0_filtered + ) + + nodes, weights = create_halton_nodes_and_weights( + af_options.n_halton_points, n_state_latent + ) + + obs_values = data_at_period.get( + "observed_factors", + jnp.zeros((int(measurements.shape[0]), 0)), + ) + + n_obs_per_batch = af_options.n_obs_per_batch + if n_obs_per_batch is None: + n_obs_per_batch = auto_n_obs_per_batch( + n_obs=int(measurements.shape[0]), + n_halton_points=af_options.n_halton_points, + n_halton_points_shock=af_options.n_halton_points_shock, + n_latent=n_joint, + n_endogenous=0, + ) + + loglike_kwargs = { + "n_factors": n_joint, + "n_latent_factors": n_state_latent, + "n_mixture_components": n_components, + "n_measures": len(all_measures), + "n_controls": len(controls_names), + "measurements": measurements, + "controls": data_at_period["controls"], + "observed_factor_values": obs_values, + "loading_mask": jnp.array(loading_mask), + "nodes": nodes, + "weights": weights, + "stability_floor": af_options.stability_floor, + "n_obs_per_batch": n_obs_per_batch, + } + + parse_kwargs = { + "n_factors": n_joint, + "n_mixture_components": n_components, + "n_measures": len(all_measures), + "n_controls": len(controls_names), + } + + return _PeriodMeta( + period=0, + is_initial=True, + slice_start=slice_start, + slice_stop=slice_stop, + params_df=period_result_params, + loglike_kwargs=MappingProxyType(loglike_kwargs), + parse_kwargs=MappingProxyType(parse_kwargs), + n_components=n_components, + n_factors_joint=n_joint, + n_state=n_state_target, + n_endog=0, + n_shock=0, + n_observed_factors=n_obs_factors, + state_factor_indices_in_joint=state_factor_indices_in_joint, + target_idx_in_joint=target_idx_in_joint, + obs_idx_in_joint=obs_idx_in_joint, + propagation=MappingProxyType({}), + ) + + +def _build_transition_period_meta( + *, + period: int, + period_result_params: pd.DataFrame, + slice_start: int, + slice_stop: int, + prev_period_params: pd.DataFrame, + prev_cond_dist: ConditionalDistribution, + model_spec: ModelSpec, + processed_model: ProcessedModel, + af_options: AFEstimationOptions, + data_at_period: Mapping[str, Array], + prev_data_at_period: Mapping[str, Array], + endogenous_factors: tuple[str, ...], + observed_factors: tuple[str, ...], +) -> _PeriodMeta: + factors = processed_model.labels.latent_factors + controls_names = processed_model.labels.controls + transition_info = processed_model.transition_info + + state_factors = tuple(f for f in factors if f not in endogenous_factors) + n_state = len(state_factors) + n_endog = len(endogenous_factors) + shock_factors = tuple( + f for f in state_factors if model_spec.factors[f].has_production_shock + ) + n_shock = len(shock_factors) + shock_factor_indices = jnp.array( + [state_factors.index(f) for f in shock_factors], dtype=jnp.int32 + ) + state_factor_indices_in_latent = jnp.array( + [factors.index(f) for f in state_factors], dtype=jnp.int32 + ) + + measurements_pt = get_measurements_per_factor(model_spec.factors, period=period) + all_measures = _get_ordered_measures(measurements_pt) + loading_mask = _build_loading_mask(all_measures, factors, measurements_pt) + + # Match transition_period.py: a single joint Halton at every step + # covers (z_state for theta_0) + (n_chain) prior chain shocks + # (z_inv, z_P) + current step's (z_inv, z_P). + n_chain = period - 1 + z_block = n_shock + n_endog + joint_dim = n_state + n_chain * z_block + z_block + joint_nodes, joint_weights = create_halton_nodes_and_weights( + af_options.n_halton_points, joint_dim, seed=period + ) + + measurements = data_at_period["measurements"] + controls = data_at_period["controls"] + prev_measurements = prev_data_at_period["measurements"] + prev_controls = prev_data_at_period["controls"] + + prev_dist_arrays, total_n_transition_params = _prepare_transition_inputs( + prev_cond_dist, + transition_info, + state_factors, + int(measurements.shape[0]), + ) + + raw_funcs = _get_raw_transition_functions( + model_spec, + state_factors, + all_factors=processed_model.labels.all_factors, + param_names=transition_info.param_names, + ) + param_counts = tuple(len(transition_info.param_names[f]) for f in state_factors) + + def combined_transition(full_states: Array, params: Array) -> Array: + out = jnp.zeros(n_state) + p_idx = 0 + for i in range(n_state): + n_p = param_counts[i] + factor_params = params[p_idx : p_idx + n_p] + out = out.at[i].set(raw_funcs[i](full_states, factor_params)) # noqa: PD008 + p_idx += n_p + return out + + n_inv_eq_params_per = 1 + n_state + len(observed_factors) if n_endog > 0 else 0 + total_n_inv_params = n_endog * n_inv_eq_params_per + + obs_factor_values = prev_data_at_period.get( + "observed_factors", + jnp.zeros((int(measurements.shape[0]), len(observed_factors))), + ) + + chain_links = prev_cond_dist.chain_links + if len(chain_links) == 0: + obs_factor_values_chain = jnp.zeros( + (int(measurements.shape[0]), 0, len(observed_factors)) + ) + else: + obs_factor_values_chain = jnp.stack( + [link.obs_factor_values for link in chain_links], axis=1 + ) + + prev_meas_info = _extract_prev_measurement_params( + prev_period_params, + model_spec, + factors, + period - 1, + ) + + n_obs_per_batch = af_options.n_obs_per_batch + if n_obs_per_batch is None: + n_obs_per_batch = auto_n_obs_per_batch( + n_obs=int(measurements.shape[0]), + n_halton_points=af_options.n_halton_points, + n_halton_points_shock=af_options.n_halton_points_shock, + n_latent=n_state, + n_endogenous=n_endog, + ) + + loglike_kwargs = { + "n_state_factors": n_state, + "n_endogenous_factors": n_endog, + "n_shock_factors": n_shock, + "shock_factor_indices": shock_factor_indices, + "state_factor_indices_in_latent": state_factor_indices_in_latent, + "n_measures": len(all_measures), + "n_controls": len(controls_names), + "measurements": measurements, + "controls": controls, + "loading_mask": jnp.array(loading_mask), + "prev_measurements": prev_measurements, + "prev_controls": prev_controls, + "prev_loading_mask": prev_meas_info["loading_mask"], + "prev_control_params": prev_meas_info["control_params"], + "prev_loadings_flat": prev_meas_info["loadings_flat"], + "prev_meas_sds": prev_meas_info["meas_sds"], + "prev_distribution": prev_dist_arrays, + "chain_links": chain_links, + "obs_factor_values_chain": obs_factor_values_chain, + "joint_nodes": joint_nodes, + "joint_weights": joint_weights, + "transition_func": combined_transition, + "total_n_transition_params": total_n_transition_params, + "total_n_inv_params": total_n_inv_params, + "n_inv_eq_params_per": n_inv_eq_params_per, + "observed_factor_values": obs_factor_values, + "stability_floor": af_options.stability_floor, + "n_obs_per_batch": n_obs_per_batch, + } + + parse_kwargs = { + "n_state_factors": n_state, + "n_endogenous_factors": n_endog, + "n_measures": len(all_measures), + "n_controls": len(controls_names), + "total_n_transition_params": total_n_transition_params, + "total_n_inv_params": total_n_inv_params, + "n_inv_eq_params_per": n_inv_eq_params_per, + "n_shock_factors": n_shock, + } + + # For propagating the cond-dist forward to the next period: marginal + # state grid (same convention as ``_update_conditional_distribution``). + propagation_nodes, propagation_weights = create_halton_nodes_and_weights( + af_options.n_halton_points, n_state + ) + + propagation = { + "state_nodes": propagation_nodes, + "state_weights": propagation_weights, + "combined_transition": combined_transition, + "obs_factor_values": obs_factor_values, + "shock_factor_indices": shock_factor_indices, + } + + return _PeriodMeta( + period=period, + is_initial=False, + slice_start=slice_start, + slice_stop=slice_stop, + params_df=period_result_params, + loglike_kwargs=MappingProxyType(loglike_kwargs), + parse_kwargs=MappingProxyType(parse_kwargs), + n_components=len(prev_cond_dist.components), + n_factors_joint=0, + n_state=n_state, + n_endog=n_endog, + n_shock=n_shock, + n_observed_factors=len(observed_factors), + state_factor_indices_in_joint=tuple(range(n_state)), + propagation=MappingProxyType(propagation), + ) + + +# --------------------------------------------------------------------------- +# Free-parameter bookkeeping. +# --------------------------------------------------------------------------- + + +def _free_positions_for_period( + params_df: pd.DataFrame, +) -> tuple[list[int], list[tuple[Any, ...]]]: + """Return positions and locs of free (unpinned, non-simplex) params.""" + _, fixed_constraints = build_optimagic_inputs(params_df, None) + fixed_locs: set[Any] = set() + for constraint in fixed_constraints: + if isinstance(constraint, FixedConstraintWithValue): + loc = constraint.loc + fixed_locs.add(tuple(loc) if isinstance(loc, tuple) else loc) + + all_locs = list(params_df.index) + positions: list[int] = [] + locs: list[tuple[Any, ...]] = [] + for i, loc in enumerate(all_locs): + loc_t = tuple(loc) + if loc_t in fixed_locs or loc[0] == "mixture_weights": + continue + positions.append(i) + locs.append(loc_t) + return positions, locs + + +# --------------------------------------------------------------------------- +# Block-diagonal sandwich (Phase 1 behaviour). +# --------------------------------------------------------------------------- + + +class _PeriodScoreInfo(NamedTuple): + """Per-period score and information matrices at the optimum. + + Internal carrier used by :func:`compute_af_standard_errors` to feed + the score bootstrap. Not part of the public API. + """ + + period: int + free_param_locs: tuple[tuple[Any, ...], ...] + score_matrix: Array + information_matrix: Array + + +def _compute_block_diagonal_sandwich( + _result: AFEstimationResult, + metas: tuple[_PeriodMeta, ...], +) -> list[_PeriodScoreInfo]: + """Compute per-period score and information matrices for the bootstrap.""" + results: list[_PeriodScoreInfo] = [] + for meta in metas: + per_obs_fn = ( + af_per_obs_loglike_initial + if meta.is_initial + else af_per_obs_loglike_transition + ) + info = _block_diagonal_sandwich_single( + meta=meta, + per_obs_loglike_fn=per_obs_fn, + ) + results.append(info) + return results + + +def _block_diagonal_sandwich_single( + *, + meta: _PeriodMeta, + per_obs_loglike_fn: Callable[..., Array], +) -> _PeriodScoreInfo: + """Compute the per-period score matrix and information matrix at theta_hat. + + These feed the score bootstrap. The information matrix is the + Hessian of the scalar negative-mean log-likelihood; the score + matrix has one row per caseid and one column per free parameter. + """ + positions, locs = _free_positions_for_period(meta.params_df) + free_positions_array = jnp.array(positions, dtype=jnp.int32) + flat_values = jnp.array(meta.params_df["value"].to_numpy()) + kwargs = dict(meta.loglike_kwargs) + + def per_obs_loglike_full(flat_params: Array) -> Array: + return per_obs_loglike_fn(flat_params, **kwargs) + + def neg_mean_loglike_full(flat_params: Array) -> Array: + return -jnp.mean(per_obs_loglike_full(flat_params)) + + jac_full = jax.jacfwd(per_obs_loglike_full)(flat_values) + hess_full = jax.hessian(neg_mean_loglike_full)(flat_values) + + score_matrix = jac_full[:, free_positions_array] + information_matrix = hess_full[free_positions_array][:, free_positions_array] + + return _PeriodScoreInfo( + period=meta.period, + free_param_locs=tuple(locs), + score_matrix=score_matrix, + information_matrix=information_matrix, + ) + + +# --------------------------------------------------------------------------- +# Full cross-period sandwich (Phase 2). +# +# Reconstruct ``prev_distribution`` and ``prev_meas_info`` as JAX-pure +# functions of a single concatenated ``flat_super`` parameter vector, so +# ``jax.jacfwd`` captures the full chain of dependencies. +# --------------------------------------------------------------------------- + + +def _build_initial_state_cond_dist_jax( + flat_params_0: Array, + meta: _PeriodMeta, +) -> tuple[Array, Array, Array, Array]: + """JAX-pure analytical reconstruction of the period-0 conditional payload. + + Mirrors ``initial_period._extract_conditional_distribution``: parse + initial-period params, compute per-component / per-obs Schur-conditional + means and per-component Cholesky factors. Returns the inputs the + transition likelihood needs to rebuild θ_0 from a joint Halton inside + its integrand (no chained-sample materialisation here). + + Return: + Tuple of (cond_means, cond_chols, log_unnorms, mixture_weights): + * cond_means: (n_components, n_obs, n_state) + * cond_chols: (n_components, n_state, n_state) + * log_unnorms: (n_components, n_obs); softmaxes to per-obs Bayes + posterior mixture weights when observed factors are present. + * mixture_weights: (n_components,) prior mixture weights. + """ + parsed = _parse_initial_params( + flat_params_0, + meta.parse_kwargs["n_factors"], + meta.parse_kwargs["n_mixture_components"], + meta.parse_kwargs["n_measures"], + meta.parse_kwargs["n_controls"], + ) + joint_means = parsed["mixture_means"] # (K, n_joint) + joint_chols = parsed["mixture_chol_covs"] # (K, n_joint, n_joint) + mixture_weights = parsed["mixture_weights"] + + obs_values = meta.loglike_kwargs["observed_factor_values"] + n_obs = int(obs_values.shape[0]) + n_obs_factors = meta.n_observed_factors + n_state = meta.n_state + target_idx = jnp.asarray(meta.target_idx_in_joint, dtype=jnp.int32) + + if n_obs_factors == 0: + + def _per_component( + joint_mean: Array, joint_chol: Array + ) -> tuple[Array, Array, Array]: + joint_cov = joint_chol @ joint_chol.T + mu_t = joint_mean[target_idx] + cov_tt = joint_cov[target_idx[:, None], target_idx[None, :]] + sub_chol = jnp.linalg.cholesky(cov_tt + 1e-10 * jnp.eye(n_state)) + cond_mean = jnp.broadcast_to(mu_t[None, :], (n_obs, n_state)) + log_unnorm = jnp.zeros(n_obs) + return cond_mean, sub_chol, log_unnorm + + cond_means, cond_chols, log_unnorms = jax.vmap(_per_component)( + joint_means, joint_chols + ) + log_unnorms = log_unnorms + jnp.log(mixture_weights + 1e-300)[:, None] + else: + obs_idx = jnp.asarray(meta.obs_idx_in_joint, dtype=jnp.int32) + + def _per_component( + joint_mean: Array, joint_chol: Array + ) -> tuple[Array, Array, Array]: + joint_cov = joint_chol @ joint_chol.T + mu_t = joint_mean[target_idx] + mu_y = joint_mean[obs_idx] + cov_tt = joint_cov[target_idx[:, None], target_idx[None, :]] + cov_ty = joint_cov[target_idx[:, None], obs_idx[None, :]] + cov_yy = joint_cov[obs_idx[:, None], obs_idx[None, :]] + chol_yy = jnp.linalg.cholesky(cov_yy) + solve_tt = jax.scipy.linalg.cho_solve((chol_yy, True), cov_ty.T) + cond_cov = cov_tt - cov_ty @ solve_tt + 1e-10 * jnp.eye(n_state) + cond_chol = jnp.linalg.cholesky(cond_cov) + + def _per_obs(y_i: Array) -> tuple[Array, Array]: + alpha = jax.scipy.linalg.cho_solve((chol_yy, True), y_i - mu_y) + cond_mean = mu_t + cov_ty @ alpha + k = y_i.shape[0] + sol = jax.scipy.linalg.solve_triangular(chol_yy, y_i - mu_y, lower=True) + log_marg = ( + -0.5 * k * jnp.log(2 * jnp.pi) + - jnp.sum(jnp.log(jnp.diag(chol_yy))) + - 0.5 * jnp.dot(sol, sol) + ) + return cond_mean, log_marg + + cond_means_per_obs, log_margs = jax.vmap(_per_obs)(obs_values) + return cond_means_per_obs, cond_chol, log_margs + + cond_means, cond_chols, log_marg_y = jax.vmap(_per_component)( + joint_means, joint_chols + ) + log_unnorms = log_marg_y + jnp.log(mixture_weights + 1e-300)[:, None] + + return cond_means, cond_chols, log_unnorms, mixture_weights + + +def _extract_chain_link_jax( + flat_params_t: Array, + meta: _PeriodMeta, +) -> ChainLink: + """JAX-pure construction of a ChainLink from period ``t``'s flat params. + + Mirrors ``transition_period._build_chain_link`` but parses the flat + params directly so the chain link's leaves are differentiable + components of ``flat_super``. Used by the inference sandwich code to + rebuild the chained sample on-demand inside the period-`t` likelihood, + keeping the autodiff DAG intact across periods. + """ + parsed = _parse_transition_params( + flat_params_t, + meta.parse_kwargs["n_state_factors"], + meta.parse_kwargs["n_endogenous_factors"], + meta.parse_kwargs["n_measures"], + meta.parse_kwargs["n_controls"], + meta.parse_kwargs["total_n_transition_params"], + meta.parse_kwargs["total_n_inv_params"], + meta.parse_kwargs["n_inv_eq_params_per"], + n_shock_factors=meta.parse_kwargs["n_shock_factors"], + ) + return ChainLink( + period=meta.period, + transition_func=meta.propagation["combined_transition"], + transition_params=parsed["transition_params"], + shock_sds=parsed["shock_sds"], + shock_factor_indices=meta.propagation["shock_factor_indices"], + inv_eq_params=parsed["inv_eq_params"], + inv_sds=parsed["inv_sds"], + n_inv_eq_params_per=meta.parse_kwargs["n_inv_eq_params_per"], + obs_factor_values=meta.propagation["obs_factor_values"], + ) + + +def _extract_prev_meas_info_jax( + flat_params_prev: Array, + meta: _PeriodMeta, +) -> dict[str, Array]: + """JAX-pure extraction of ``prev_meas_info`` from a period's flat params.""" + if meta.is_initial: + parsed = _parse_initial_params( + flat_params_prev, + meta.parse_kwargs["n_factors"], + meta.parse_kwargs["n_mixture_components"], + meta.parse_kwargs["n_measures"], + meta.parse_kwargs["n_controls"], + ) + return { + "loadings_flat": parsed["loadings"], + "control_params": parsed["control_params"], + "meas_sds": parsed["meas_sds"], + } + parsed = _parse_transition_params( + flat_params_prev, + meta.parse_kwargs["n_state_factors"], + meta.parse_kwargs["n_endogenous_factors"], + meta.parse_kwargs["n_measures"], + meta.parse_kwargs["n_controls"], + meta.parse_kwargs["total_n_transition_params"], + meta.parse_kwargs["total_n_inv_params"], + meta.parse_kwargs["n_inv_eq_params_per"], + n_shock_factors=meta.parse_kwargs["n_shock_factors"], + ) + return { + "loadings_flat": parsed["loadings_flat"], + "control_params": parsed["control_params"], + "meas_sds": parsed["meas_sds"], + } + + +def _build_prev_dist_arrays( + flat_super: Array, + target_t: int, + metas: tuple[_PeriodMeta, ...], + cond_weights_override: Array | None = None, +) -> tuple[dict[str, Array], tuple[ChainLink, ...], Array]: + """Build the period-0 conditional payload and chain history for period ``t``. + + Replaces the previous static-sample carry-over with the joint-Halton + chain rebuild contract: the period-`t` likelihood expects + ``prev_dist_arrays`` (with cond_weights / cond_means / cond_chols), + a tuple of `ChainLink`s for the prior transition steps, and a + per-obs ``obs_factor_values_chain`` tensor. The chain rebuild + happens inside the integrand from a single joint Halton design. + + The autodiff DAG flows through each `ChainLink`'s leaves + (transition_params, shock_sds, inv_eq_params, inv_sds) which are + parsed from `flat_super`'s per-period slices. + """ + meta0 = metas[0] + flat_params_0 = flat_super[meta0.slice_start : meta0.slice_stop] + cond_means, cond_chols, log_unnorms, mixture_weights = ( + _build_initial_state_cond_dist_jax(flat_params_0, meta0) + ) + + chain_links: list[ChainLink] = [] + for s in range(1, target_t): + meta_s = metas[s] + flat_params_s = flat_super[meta_s.slice_start : meta_s.slice_stop] + chain_links.append(_extract_chain_link_jax(flat_params_s, meta_s)) + + if cond_weights_override is not None: + cond_weights = cond_weights_override + elif meta0.n_observed_factors > 0: + cond_weights = jax.nn.softmax(log_unnorms, axis=0).T + else: + meta_target = metas[target_t] + n_obs = int(meta_target.loglike_kwargs["measurements"].shape[0]) + n_components = metas[0].n_components + cond_weights = jnp.broadcast_to(mixture_weights[None, :], (n_obs, n_components)) + + prev_dist_arrays = { + "cond_weights": cond_weights, + "cond_means": cond_means, + "cond_chols": cond_chols, + } + + # Per-obs observed factor values at each chain link's source period. + meta_target = metas[target_t] + n_obs = int(meta_target.loglike_kwargs["measurements"].shape[0]) + n_obs_factors = meta0.n_observed_factors + if not chain_links: + obs_factor_values_chain = jnp.zeros((n_obs, 0, n_obs_factors)) + else: + obs_factor_values_chain = jnp.stack( + [link.obs_factor_values for link in chain_links], axis=1 + ) + + return prev_dist_arrays, tuple(chain_links), obs_factor_values_chain + + +def _period_t_per_obs_loglike_full( + flat_super: Array, + t: int, + metas: tuple[_PeriodMeta, ...], +) -> Array: + """Per-obs loglike for period ``t`` as a function of the full flat vector.""" + meta_t = metas[t] + flat_params_t = flat_super[meta_t.slice_start : meta_t.slice_stop] + if meta_t.is_initial: + return af_per_obs_loglike_initial(flat_params_t, **meta_t.loglike_kwargs) + + # Reuse the baked cond_weights from the meta (it was built via the same + # ``_prepare_transition_inputs`` path as estimation and already honours + # any stored ``conditional_weights``; when ``conditional_weights`` is + # ``None`` it is a broadcast of the initial-period mixture weights). + stored_cond_weights = meta_t.loglike_kwargs["prev_distribution"]["cond_weights"] + prev_dist_arrays, chain_links, obs_factor_values_chain = _build_prev_dist_arrays( + flat_super, t, metas, cond_weights_override=stored_cond_weights + ) + meta_prev = metas[t - 1] + flat_params_prev = flat_super[meta_prev.slice_start : meta_prev.slice_stop] + prev_meas = _extract_prev_meas_info_jax(flat_params_prev, meta_prev) + + kwargs = dict(meta_t.loglike_kwargs) + kwargs["prev_distribution"] = prev_dist_arrays + kwargs["chain_links"] = chain_links + kwargs["obs_factor_values_chain"] = obs_factor_values_chain + kwargs["prev_loadings_flat"] = prev_meas["loadings_flat"] + kwargs["prev_control_params"] = prev_meas["control_params"] + kwargs["prev_meas_sds"] = prev_meas["meas_sds"] + return af_per_obs_loglike_transition(flat_params_t, **kwargs) + + +__all__ = [ + "AFInferenceResult", + "compute_af_standard_errors", +] diff --git a/src/skillmodels/af/initial_period.py b/src/skillmodels/af/initial_period.py new file mode 100644 index 00000000..511f7136 --- /dev/null +++ b/src/skillmodels/af/initial_period.py @@ -0,0 +1,676 @@ +"""Step 0 of the AF estimator: initial period estimation. + +Estimate the joint distribution of latent factors at period 0 and the +measurement system parameters, using a mixture-of-normals model with +Halton quadrature for numerical integration. +""" + +import jax +import jax.numpy as jnp +import numpy as np +import optimagic as om +import pandas as pd +from jax import Array + +from skillmodels.af.batching import auto_n_obs_per_batch +from skillmodels.af.halton import create_halton_nodes_and_weights +from skillmodels.af.likelihood import ( + _log_mvn_pdf_chol, + af_loglike_initial, + create_loglike_and_gradient, +) +from skillmodels.af.params import ( + apply_fixed_params, + apply_start_params, + build_optimagic_inputs, + create_af_params_template, + get_initial_period_params_index, + get_measurements_per_factor, + get_normalizations_for_period, +) +from skillmodels.af.types import ( + AFEstimationOptions, + AFPeriodResult, + ConditionalDistribution, + MixtureComponent, +) +from skillmodels.amn.moments import spearman_factor_moments +from skillmodels.common.model_spec import ModelSpec +from skillmodels.common.types import ProcessedModel + + +def estimate_initial_period( + model_spec: ModelSpec, + processed_model: ProcessedModel, + measurements: Array, + controls: Array, + af_options: AFEstimationOptions, + state_factors: tuple[str, ...] | None = None, + start_params: pd.DataFrame | None = None, + fixed_params: pd.DataFrame | None = None, + observed_factors: tuple[str, ...] = (), + observed_factor_values: Array | None = None, +) -> tuple[AFPeriodResult, ConditionalDistribution]: + """Estimate the initial period (Step 0) of the AF procedure. + + Fit a mixture-of-normals distribution for the joint vector of latent + factors (and, optionally, observed factors) at period 0, together with + the measurement system parameters, via MLE with Halton quadrature. + + When `observed_factors` is non-empty, the joint distribution is modelled + over (latent, observed) and per-individual observed values are used to + condition the Halton draws via the Schur complement. This concentrates + nodes on the region of latent space consistent with each individual's + observed data, improving quadrature precision. + + Args: + model_spec: Model specification. + processed_model: Processed model from `process_model()`. + measurements: Shape (n_obs, n_measures), period 0 measurement values. + controls: Shape (n_obs, n_controls), period 0 control values. + af_options: AF estimation options. + state_factors: Subset of latent factors used as state factors for + AF propagation. If `None`, all latent factors are used. + start_params: Optional starting values. Matching index entries + override heuristic defaults. + fixed_params: Optional DataFrame with a "value" column pinning + specified parameters (value + bounds both clamped to the value). + observed_factors: Names of observed factors included in the joint + initial distribution. Defaults to empty. + observed_factor_values: Shape (n_obs, n_observed_factors) array of + observed factor values. Required iff `observed_factors` is + non-empty. + + Return: + Tuple of (AFPeriodResult, ConditionalDistribution) where the + distribution represents the estimated f(theta_0 | data_0), restricted + to latent (or `state_factors`) coordinates. + + """ + n_components = af_options.n_mixture_components + factors = processed_model.labels.latent_factors + controls_names = processed_model.labels.controls + n_obs_factors = len(observed_factors) + + reconstructed_factors = tuple( + f for f in factors if not model_spec.factors[f].has_initial_distribution + ) + state_latent_factors = tuple(f for f in factors if f not in reconstructed_factors) + n_state_latent = len(state_latent_factors) + n_joint = n_state_latent + n_obs_factors + + if n_obs_factors > 0 and observed_factor_values is None: + msg = "observed_factor_values required when observed_factors is non-empty." + raise ValueError(msg) + obs_values = ( + observed_factor_values + if observed_factor_values is not None + else jnp.zeros((measurements.shape[0], 0)) + ) + + # Build parameter index and template + measurements_p0 = get_measurements_per_factor(model_spec.factors, period=0) + params_index = get_initial_period_params_index( + n_mixture_components=n_components, + latent_factors=factors, + measurements_period_0=measurements_p0, + controls=controls_names, + observed_factors=observed_factors, + reconstructed_factors=reconstructed_factors, + ) + normalizations = get_normalizations_for_period(model_spec.factors, period=0) + params_template = create_af_params_template( + params_index, + normalizations, + period=0, + ) + + # Initialize parameters via simple heuristics + params_template = _initialize_params_heuristic( + params_template, + measurements, + controls, + n_state_latent, + n_components, + observed_factors=observed_factors, + observed_factor_values=obs_values, + ) + + # Optionally override SDs / loadings / Cholesky diagonals via Spearman + # moments. This places the optimizer near the strongly-identified MLE + # neighborhood instead of at the static default 0.5 / obs_sd*0.5; for + # parameters on weakly-identified ridges (notably sigma_inv vs sigma_meas) the + # moment-based seed is the difference between converging at truth and + # drifting to the boundary. + if af_options.initialization_strategy == "spearman": + all_measures_full = _get_ordered_measures(measurements_p0) + params_template = _apply_moment_based_overrides_initial( + params_template, + measurements, + measurements_per_factor=measurements_p0, + all_measures=all_measures_full, + normalizations=normalizations, + n_components=n_components, + ) + + # Override with user-supplied starting values where available + if start_params is not None: + apply_start_params(params_template, start_params) + + # Align template values with user-supplied fixes (bounds are not clamped; + # pinning happens via FixedConstraintWithValue further below). + if fixed_params is not None: + apply_fixed_params(params_template, fixed_params) + + # Period-0 measurements and loading mask cover state-latent factors only. + # Reconstructed factors' period-0 measurements are handled in the + # transition step 0->1. + measurements_p0_filtered = { + f: m for f, m in measurements_p0.items() if f in state_latent_factors + } + all_measures_full = _get_ordered_measures(measurements_p0) + all_measures = _get_ordered_measures(measurements_p0_filtered) + if len(all_measures) != len(all_measures_full): + col_indices = jnp.array( + [all_measures_full.index(m) for m in all_measures], dtype=jnp.int32 + ) + measurements = measurements[:, col_indices] + loading_mask = _build_loading_mask( + all_measures, state_latent_factors, measurements_p0_filtered + ) + + # Halton quadrature nodes: dimension equals the state-latent count + # (observed factors are conditioned on, not integrated over, via the + # Schur complement). + nodes, weights = create_halton_nodes_and_weights( + af_options.n_halton_points, + n_state_latent, + ) + + # Translate normalization fixes and user-supplied fixes into FixedConstraints + # so they compose with other constraints (e.g. ProbabilityConstraint). + full_params_df, fixed_constraints = build_optimagic_inputs( + params_template, fixed_params + ) + + n_obs_per_batch = af_options.n_obs_per_batch + if n_obs_per_batch is None: + n_obs_per_batch = auto_n_obs_per_batch( + n_obs=int(measurements.shape[0]), + n_halton_points=af_options.n_halton_points, + n_halton_points_shock=af_options.n_halton_points_shock, + n_latent=n_joint, + n_endogenous=0, + ) + + loglike_kwargs = { + "n_factors": n_joint, + "n_latent_factors": n_state_latent, + "n_mixture_components": n_components, + "n_measures": len(all_measures), + "n_controls": len(controls_names), + "measurements": measurements, + "controls": controls, + "observed_factor_values": obs_values, + "loading_mask": jnp.array(loading_mask), + "nodes": nodes, + "weights": weights, + "stability_floor": af_options.stability_floor, + "n_obs_per_batch": n_obs_per_batch, + } + + loglike_and_grad = create_loglike_and_gradient( + af_loglike_initial, + **loglike_kwargs, + ) + + def fun(params_df: pd.DataFrame) -> float: + val, _grad = loglike_and_grad(jnp.array(params_df["value"].to_numpy())) + return float(val) + + def fun_and_jac(params_df: pd.DataFrame) -> tuple[float, np.ndarray]: + val, grad = loglike_and_grad(jnp.array(params_df["value"].to_numpy())) + return float(val), np.array(grad) + + opt_res = om.minimize( + fun=fun, + params=full_params_df[["value"]], + algorithm=af_options.optimizer_algorithm, + bounds=om.Bounds( + lower=full_params_df["lower_bound"], + upper=full_params_df["upper_bound"], + ), + constraints=list(fixed_constraints) or None, + fun_and_jac=fun_and_jac, + **dict(af_options.optimizer_options), + ) + + # Write optimized values back into full template + result_params = params_template.copy() + result_params["value"] = opt_res.params["value"].to_numpy() + + # Extract conditional distribution (state factors only for AF propagation), + # building the per-obs importance sample of skills_0 from the same Halton + # design used for the optimization. + sf = state_factors if state_factors is not None else factors + cond_dist = _extract_conditional_distribution( + result_params, + len(sf), + n_components, + sf, + nodes=nodes, + observed_factor_values=obs_values, + ) + + period_result = AFPeriodResult( + period=0, + params=result_params, + loglikelihood=-float(opt_res.fun), + success=bool(opt_res.success), + optimize_result=opt_res, + ) + + return period_result, cond_dist + + +def _get_ordered_measures( + measurements_per_factor: dict[str, tuple[str, ...]], +) -> list[str]: + """Get all measurement variables in a deterministic order.""" + seen: set[str] = set() + result: list[str] = [] + for measures in measurements_per_factor.values(): + for m in measures: + if m not in seen: + seen.add(m) + result.append(m) + return result + + +def _build_loading_mask( + all_measures: list[str], + factors: tuple[str, ...], + measurements_per_factor: dict[str, tuple[str, ...]], +) -> np.ndarray: + """Build boolean mask for which (measure, factor) pairs have loadings.""" + n_measures = len(all_measures) + n_factors = len(factors) + mask = np.zeros((n_measures, n_factors), dtype=bool) + meas_idx = {m: i for i, m in enumerate(all_measures)} + fac_idx = {f: i for i, f in enumerate(factors)} + for factor, measures in measurements_per_factor.items(): + fi = fac_idx[factor] + for m in measures: + mi = meas_idx[m] + mask[mi, fi] = True + return mask + + +def _initialize_params_heuristic( + params_template: pd.DataFrame, + measurements: Array, + _controls: Array, + _n_factors: int, + n_components: int, + observed_factors: tuple[str, ...] = (), + observed_factor_values: Array | None = None, +) -> pd.DataFrame: + """Initialize parameters using simple heuristics. + + Use measurement means and variances to set reasonable starting values + for mixture means, variances, loadings, and measurement SDs. When + observed factors are present, their means come from sample means and + their Cholesky diagonals from sample SDs. + """ + params = params_template.copy() + meas_np = np.array(measurements) + + # Overall mean and SD of first measurement as proxy for latent factor distribution + meas_mean = float(np.nanmean(meas_np[:, 0])) + meas_sd = float(np.nanstd(meas_np[:, 0])) + if meas_sd < 1e-8: + meas_sd = 1.0 + + obs_means, obs_sds = _observed_factor_stats( + observed_factors, observed_factor_values, n_rows=meas_np.shape[0] + ) + + # Set mixture weights to uniform + weight_mask = params.index.get_level_values("category") == "mixture_weights" + params.loc[weight_mask, "value"] = 1.0 / n_components + + _set_initial_mixture_means( + params, n_components, meas_mean, meas_sd, obs_means, obs_sds + ) + _set_initial_cholcov_diagonals(params, meas_sd, obs_sds) + + # Set measurement SDs to half the observed SD + sd_mask = params.index.get_level_values("category") == "meas_sds" + for i, idx in enumerate(params.index[sd_mask]): + obs_sd = float(np.nanstd(meas_np[:, i])) if i < meas_np.shape[1] else 1.0 + params.loc[idx, "value"] = max(obs_sd * 0.5, 0.01) + + # Set loadings to 1.0 (where not fixed) + load_mask = params.index.get_level_values("category") == "loadings" + for idx in params.index[load_mask]: + if params.loc[idx, "lower_bound"] != params.loc[idx, "upper_bound"]: + params.loc[idx, "value"] = 1.0 + + # Set control intercepts to measurement means (where not fixed) + ctrl_mask = params.index.get_level_values("category") == "controls" + for idx in params.index[ctrl_mask]: + if ( + idx[3] == "constant" + and params.loc[idx, "lower_bound"] != params.loc[idx, "upper_bound"] + ): + params.loc[idx, "value"] = 0.0 + + return params + + +def _set_initial_mixture_means( + params: pd.DataFrame, + n_components: int, + meas_mean: float, + meas_sd: float, + obs_means: dict[str, float], + obs_sds: dict[str, float], +) -> None: + """Set initial_states values in place: spread components around sample means.""" + mean_mask = params.index.get_level_values("category") == "initial_states" + mean_vals = params.loc[mean_mask, "value"].copy() + for idx in mean_vals.index: + comp = idx[2] + factor = idx[3] + component_offset = (int(comp.split("_")[1]) - (n_components - 1) / 2) * 0.5 + if factor in obs_means: + mean_vals.loc[idx] = obs_means[factor] + component_offset * obs_sds[factor] + else: + mean_vals.loc[idx] = meas_mean + component_offset * meas_sd + params.loc[mean_mask, "value"] = mean_vals + + +def _set_initial_cholcov_diagonals( + params: pd.DataFrame, + meas_sd: float, + obs_sds: dict[str, float], +) -> None: + """Set initial_cholcovs diagonals to factor sample SD, off-diags to 0.""" + chol_mask = params.index.get_level_values("category") == "initial_cholcovs" + for idx in params.index[chol_mask]: + parts = idx[3].split("-") + if len(parts) == 2 and parts[0] == parts[1]: + params.loc[idx, "value"] = obs_sds.get(parts[0], meas_sd * 0.5) + else: + params.loc[idx, "value"] = 0.0 + + +def _observed_factor_stats( + observed_factors: tuple[str, ...], + observed_factor_values: Array | None, + n_rows: int, +) -> tuple[dict[str, float], dict[str, float]]: + """Return per-observed-factor sample means and SDs (SDs clipped to >= 0.01).""" + obs_vals_np = ( + np.array(observed_factor_values) + if observed_factor_values is not None + else np.zeros((n_rows, 0)) + ) + obs_means = { + factor: float(np.nanmean(obs_vals_np[:, i])) + for i, factor in enumerate(observed_factors) + } + obs_sds = { + factor: max(float(np.nanstd(obs_vals_np[:, i])), 0.01) + for i, factor in enumerate(observed_factors) + } + return obs_means, obs_sds + + +def _extract_conditional_distribution( # noqa: PLR0915 + params: pd.DataFrame, + _n_factors: int, + n_components: int, + factors: tuple[str, ...], + nodes: Array, + observed_factor_values: Array, +) -> ConditionalDistribution: + """Extract the initial distribution and build the period-0 importance sample. + + For each mixture component l, build a per-obs importance sample of + skills_0 of shape ``(n_halton, n_obs, n_state)``, conditional (where + applicable) on the observed factor values via the Schur complement. + Per-obs mixture weights `p(l | Y_i)` are computed by Bayes' rule from + the marginal density of Y_i under each component. + + These samples are propagated forward across periods (rather than being + re-collapsed to a Gaussian mixture and re-drawn freshly) so the + non-Gaussian shape of skills_t survives transitions through the CES + production function. + """ + # Mixture weights + weight_mask = params.index.get_level_values("category") == "mixture_weights" + weights_raw = jnp.array(params.loc[weight_mask, "value"].to_numpy()) + weights = weights_raw / weights_raw.sum() + + # Determine joint factor ordering from the stored initial_states entries + joint_factors = _get_joint_factors_in_order(params, n_components) + n_state = len(factors) + n_obs = int(observed_factor_values.shape[0]) + n_obs_factors = int(observed_factor_values.shape[1]) + + # Indices into joint_factors: + # - target_idx: positions of `factors` (the state factors we want samples for). + # - obs_idx: positions of observed factors at the joint's tail. + # Joint stores (state_latent_factors, observed_factors) in that order. + target_idx = jnp.array([joint_factors.index(f) for f in factors], dtype=jnp.int32) + obs_idx = jnp.array( + [ + joint_factors.index(joint_factors[len(joint_factors) - n_obs_factors + k]) + for k in range(n_obs_factors) + ], + dtype=jnp.int32, + ) + + components: list[MixtureComponent] = [] + samples_per_component: list[Array] = [] + log_unnorm_weights_per_component: list[Array] = [] + cond_means_per_component: list[Array] = [] + cond_chols_per_component: list[Array] = [] + + for m in range(n_components): + joint_mean = jnp.array( + [ + float(params.loc[("initial_states", 0, f"mixture_{m}", fac), "value"]) # ty: ignore[invalid-argument-type] + for fac in joint_factors + ] + ) + joint_chol = _assemble_joint_chol(params, joint_factors, m) + joint_cov = joint_chol @ joint_chol.T + + mu_theta = joint_mean[target_idx] + cov_tt = joint_cov[target_idx[:, None], target_idx[None, :]] + + if n_obs_factors == 0: + sub_mean = mu_theta + sub_chol = jnp.linalg.cholesky(cov_tt + 1e-10 * jnp.eye(n_state)) + z_for_state = nodes[:, :n_state] + per_node = sub_mean[None, :] + z_for_state @ sub_chol.T + samples = jnp.broadcast_to( + per_node[:, None, :], (nodes.shape[0], n_obs, n_state) + ) + log_unnorm = jnp.full((n_obs,), float(jnp.log(weights[m] + 1e-300))) + # Per-obs cond_means broadcast (n_obs, n_state); shared chol. + cond_means_obs = jnp.broadcast_to(sub_mean[None, :], (n_obs, n_state)) + cond_chol_comp = sub_chol + else: + mu_y = joint_mean[obs_idx] + cov_ty = joint_cov[target_idx[:, None], obs_idx[None, :]] + cov_yy = joint_cov[obs_idx[:, None], obs_idx[None, :]] + + chol_yy = jnp.linalg.cholesky(cov_yy) + solve_tt = jax.scipy.linalg.cho_solve((chol_yy, True), cov_ty.T) + cond_cov = cov_tt - cov_ty @ solve_tt + 1e-10 * jnp.eye(n_state) + cond_chol = jnp.linalg.cholesky(cond_cov) + + def _per_obs( + y_i: Array, + chol_yy: Array = chol_yy, + mu_y: Array = mu_y, + mu_theta: Array = mu_theta, + cov_ty: Array = cov_ty, + ) -> tuple[Array, Array]: + alpha = jax.scipy.linalg.cho_solve((chol_yy, True), y_i - mu_y) + cond_mean = mu_theta + cov_ty @ alpha + log_marg_y = _log_mvn_pdf_chol(y_i, mu_y, chol_yy) + return cond_mean, log_marg_y + + cond_means, log_margs = jax.vmap(_per_obs)(observed_factor_values) + z_for_state = nodes[:, :n_state] + samples = cond_means[None, :, :] + (z_for_state @ cond_chol.T)[:, None, :] + sub_mean = mu_theta + sub_chol = cond_chol + log_unnorm = jnp.log(weights[m] + 1e-300) + log_margs + cond_means_obs = cond_means + cond_chol_comp = cond_chol + + components.append(MixtureComponent(mean=sub_mean, chol_cov=sub_chol)) + samples_per_component.append(samples) + log_unnorm_weights_per_component.append(log_unnorm) + cond_means_per_component.append(cond_means_obs) + cond_chols_per_component.append(cond_chol_comp) + + if n_obs_factors > 0: + log_w_stack = jnp.stack( + log_unnorm_weights_per_component, axis=-1 + ) # (n_obs, n_components) + cond_weights = jax.nn.softmax(log_w_stack, axis=-1) + else: + cond_weights = jnp.broadcast_to(weights[None, :], (n_obs, n_components)) + + return ConditionalDistribution( + mixture_weights=weights, + components=tuple(components), + samples_per_component=tuple(samples_per_component), + conditional_weights=cond_weights, + cond_means=jnp.stack(cond_means_per_component, axis=0), + cond_chols=jnp.stack(cond_chols_per_component, axis=0), + ) + + +def _get_joint_factors_in_order( + params: pd.DataFrame, + n_components: int, +) -> tuple[str, ...]: + """Return the joint factor ordering used in initial_states entries.""" + mask = (params.index.get_level_values("category") == "initial_states") & ( + params.index.get_level_values("name1") == f"mixture_{n_components - 1}" + ) + del n_components + return tuple(params.loc[mask].index.get_level_values("name2")) + + +def _assemble_joint_chol( + params: pd.DataFrame, + joint_factors: tuple[str, ...], + component: int, +) -> Array: + """Build the lower-triangular joint Cholesky matrix for one component.""" + n = len(joint_factors) + chol = jnp.zeros((n, n)) + for row, f1 in enumerate(joint_factors): + for col, f2 in enumerate(joint_factors): + if col <= row: + loc = ("initial_cholcovs", 0, f"mixture_{component}", f"{f1}-{f2}") + val = float(params.loc[loc, "value"]) # ty: ignore[invalid-argument-type] + chol = chol.at[row, col].set(val) # noqa: PD008 + return chol + + +def _apply_moment_based_overrides_initial( # noqa: C901, PLR0912 + params: pd.DataFrame, + measurements: Array, + measurements_per_factor: dict[str, tuple[str, ...]], + all_measures: list[str], + normalizations: dict[str, dict[tuple[str, str], float]], + n_components: int, +) -> pd.DataFrame: + """Override static initialization with Spearman cross-cov moments. + + For each latent factor with at least two period-0 measurements, apply + `spearman_factor_moments` to the corresponding columns of + `measurements` and write the recovered loadings, sigma_meas, and per-component + Cholesky-diagonal sqrt(Var(F)) values into `params`. Skip rows where + `lower_bound == upper_bound` (i.e. user normalizations or fixed + constraints). + + The anchor measurement is determined from `normalizations["loadings"]` + when a loading is pinned for the factor; otherwise the first measurement + is the anchor. + """ + out = params.copy() + meas_np = np.array(measurements) + n_obs = meas_np.shape[0] + if n_obs == 0: + return out + meas_index = {m: i for i, m in enumerate(all_measures)} + loading_norms = normalizations.get("loadings", {}) + + for factor, factor_meas in measurements_per_factor.items(): + if len(factor_meas) < 2: + continue + cols = [meas_index[m] for m in factor_meas if m in meas_index] + if len(cols) < 2: + continue + sub = meas_np[:, cols] + + # Anchor: pick the measurement whose loading is pinned for this + # factor, falling back to the first measurement. + anchor_loading = 1.0 + anchor_local = 0 + for local_idx, meas_name in enumerate(factor_meas): + if (meas_name, factor) in loading_norms: + anchor_local = local_idx + anchor_loading = float(loading_norms[(meas_name, factor)]) + break + + result = spearman_factor_moments( + sub, + anchor_idx=anchor_local, + anchor_loading=anchor_loading, + ) + if not result.valid: + continue + + # Override loadings (skip pinned rows). + for local_idx, meas_name in enumerate(factor_meas): + loc = ("loadings", 0, meas_name, factor) + if loc not in out.index: + continue + if out.loc[loc, "lower_bound"] != out.loc[loc, "upper_bound"]: + out.loc[loc, "value"] = float(result.loadings[local_idx]) + + # Override measurement SDs (skip pinned rows). + for local_idx, meas_name in enumerate(factor_meas): + loc = ("meas_sds", 0, meas_name, "-") + if loc not in out.index: + continue + if out.loc[loc, "lower_bound"] != out.loc[loc, "upper_bound"]: + out.loc[loc, "value"] = float(result.meas_sds[local_idx]) + + # Override per-component Cholesky diagonal for this factor with + # sqrt(Var(F)). Off-diagonals stay at 0 (set by the heuristic). + sd_factor = float(np.sqrt(max(result.latent_var, 1e-12))) + for comp in range(n_components): + loc = ( + "initial_cholcovs", + 0, + f"mixture_{comp}", + f"{factor}-{factor}", + ) + if loc not in out.index: + continue + if out.loc[loc, "lower_bound"] != out.loc[loc, "upper_bound"]: + out.loc[loc, "value"] = sd_factor + + return out diff --git a/src/skillmodels/af/likelihood.py b/src/skillmodels/af/likelihood.py new file mode 100644 index 00000000..10506e71 --- /dev/null +++ b/src/skillmodels/af/likelihood.py @@ -0,0 +1,1262 @@ +"""JAX-based likelihood functions for AF estimation. + +All functions are JAX-compatible (jittable, differentiable via jax.grad). +""" + +import functools +from collections.abc import Callable +from typing import Any + +import jax +import jax.numpy as jnp +from jax import Array + +from skillmodels.af.types import ChainLink + + +def af_per_obs_loglike_initial( + params: Array, + *, + n_factors: int, + n_mixture_components: int, + n_measures: int, + n_controls: int, + measurements: Array, + controls: Array, + loading_mask: Array, + nodes: Array, + weights: Array, + stability_floor: float, + n_latent_factors: int | None = None, + observed_factor_values: Array | None = None, + n_obs_per_batch: int | None = None, +) -> Array: + """Per-observation log-likelihood for the initial period (Step 0). + + Same inputs as `af_loglike_initial`; returns the shape-``(n_obs,)`` + vector of per-observation log-likelihoods instead of the aggregated + negative mean. Used for score-based inference. + """ + n_latent = n_factors if n_latent_factors is None else n_latent_factors + n_obs_factors = n_factors - n_latent + + parsed = _parse_initial_params( + params, + n_factors, + n_mixture_components, + n_measures, + n_controls, + ) + + if n_obs_factors == 0: + return _initial_loglike_per_obs( + mixture_weights=parsed["mixture_weights"], + mixture_means=parsed["mixture_means"], + mixture_chol_covs=parsed["mixture_chol_covs"], + control_params=parsed["control_params"], + loadings=parsed["loadings"], + meas_sds=parsed["meas_sds"], + measurements=measurements, + controls=controls, + loading_mask=loading_mask, + nodes=nodes, + weights=weights, + stability_floor=stability_floor, + n_obs_per_batch=n_obs_per_batch, + ) + assert observed_factor_values is not None # noqa: S101 + return _initial_loglike_per_obs_conditional( + mixture_weights=parsed["mixture_weights"], + mixture_means=parsed["mixture_means"], + mixture_chol_covs=parsed["mixture_chol_covs"], + control_params=parsed["control_params"], + loadings=parsed["loadings"], + meas_sds=parsed["meas_sds"], + measurements=measurements, + controls=controls, + observed_factor_values=observed_factor_values, + loading_mask=loading_mask, + nodes=nodes, + weights=weights, + n_latent=n_latent, + stability_floor=stability_floor, + n_obs_per_batch=n_obs_per_batch, + ) + + +def af_loglike_initial( + params: Array, + *, + n_factors: int, + n_mixture_components: int, + n_measures: int, + n_controls: int, + measurements: Array, + controls: Array, + loading_mask: Array, + nodes: Array, + weights: Array, + stability_floor: float, + n_latent_factors: int | None = None, + observed_factor_values: Array | None = None, + n_obs_per_batch: int | None = None, +) -> Array: + """Negative log-likelihood for the initial period (Step 0). + + Integrate over latent factors using Halton quadrature. + + When `n_latent_factors == n_factors` (no observed factors in the joint + distribution), the likelihood reduces to:: + + L_i = sum_q w_q * sum_l pi_l + * prod_m N(Z_{0,m,i} | c_m + lam_m' theta_q,l, sd_m) + + where theta_q,l = mu_l + L_l @ z_q. + + When `n_latent_factors < n_factors` (joint distribution over + (latent, observed)), for each individual i:: + + L_i = p(Y_i) * sum_q w_q * sum_l pi_{l|Y_i} + * prod_m N(Z_{0,m,i} | c_m + lam_m' theta_{q,l|Y_i}, sd_m) + + where theta_{q,l|Y_i} is drawn from the conditional N(mu_{theta|Y,l,i}, + Sigma_{theta|Y,l}) via the Schur complement, and pi_{l|Y_i} are the + posterior component weights given Y_i. + + Args: + params: Full parameter vector in template order. Fixed entries are + held constant by optimagic `FixedConstraint`s attached outside. + n_factors: Number of factors in the joint initial distribution + (state latents + observed). Reconstructed factors + (``has_initial_distribution=False``) are excluded from this + count; their period-0 measurements are estimated in the + period 0->1 transition step instead. + n_mixture_components: Number of mixture components. + n_measures: Number of measurement variables in period 0. + n_controls: Number of control variables (including constant). + measurements: Shape (n_obs, n_measures), observed measurements. + controls: Shape (n_obs, n_controls), control variable values. + loading_mask: Shape (n_measures, n_state_latent), True where loading + exists. + nodes: Shape (n_nodes, n_state_latent), standard normal quadrature + nodes. + weights: Shape (n_nodes,), quadrature weights. + stability_floor: Small constant added for numerical stability. + n_latent_factors: Number of state latent factors in the mixture. + Defaults to ``n_factors`` when no observed factors are present. + observed_factor_values: Shape (n_obs, n_obs_factors), observed factor + values used for Schur-complement conditioning. Required when + ``n_latent_factors < n_factors``. + n_obs_per_batch: Observations per reverse-mode autodiff chunk. + ``None`` falls back to ``jax.vmap`` (single kernel); a positive + integer uses ``jax.lax.map`` so the backward-pass tape only + retains one chunk at a time. + + Return: + Scalar negative log-likelihood. + + """ + log_likes = af_per_obs_loglike_initial( + params, + n_factors=n_factors, + n_mixture_components=n_mixture_components, + n_measures=n_measures, + n_controls=n_controls, + measurements=measurements, + controls=controls, + loading_mask=loading_mask, + nodes=nodes, + weights=weights, + stability_floor=stability_floor, + n_latent_factors=n_latent_factors, + observed_factor_values=observed_factor_values, + n_obs_per_batch=n_obs_per_batch, + ) + return -jnp.mean(log_likes) + + +def _parse_initial_params( + params: Array, + n_factors: int, + n_mixture_components: int, + n_measures: int, + n_controls: int, +) -> dict[str, Array]: + """Parse flat parameter vector into structured initial-period params.""" + idx = 0 + + # Mixture weights + mixture_weights = params[idx : idx + n_mixture_components] + mixture_weights = mixture_weights / mixture_weights.sum() + idx += n_mixture_components + + # Mixture means: (n_components, n_factors) + n_mean = n_mixture_components * n_factors + mixture_means = params[idx : idx + n_mean].reshape(n_mixture_components, n_factors) + idx += n_mean + + # Mixture Cholesky covariances: (n_components, n_factors, n_factors) lower tri + n_chol = n_factors * (n_factors + 1) // 2 + mixture_chol_covs = jnp.zeros((n_mixture_components, n_factors, n_factors)) + for m in range(n_mixture_components): + chol_flat = params[idx : idx + n_chol] + idx += n_chol + chol = jnp.zeros((n_factors, n_factors)) + chol = chol.at[jnp.tril_indices(n_factors)].set(chol_flat) + mixture_chol_covs = mixture_chol_covs.at[m].set(chol) + + # Control params: (n_measures, n_controls) + n_ctrl = n_measures * n_controls + control_params = params[idx : idx + n_ctrl].reshape(n_measures, n_controls) + idx += n_ctrl + + # Loadings: (n_measures, n_factors) -- sparse, packed + n_loadings = int(params.shape[0]) - idx - n_measures + loadings_flat = params[idx : idx + n_loadings] + idx += n_loadings + + # Measurement SDs + meas_sds = params[idx : idx + n_measures] + + return { + "mixture_weights": mixture_weights, + "mixture_means": mixture_means, + "mixture_chol_covs": mixture_chol_covs, + "control_params": control_params, + "loadings": loadings_flat, + "meas_sds": meas_sds, + } + + +def _map_over_obs( + f: Callable, + *xs: Array, + n_obs_per_batch: int | None, +) -> Array: + """Map ``f`` over the leading axis of ``xs``, optionally in batches. + + When ``n_obs_per_batch`` is ``None`` or at least as large as the + leading axis, falls back to ``jax.vmap`` (single kernel). Otherwise + uses ``jax.lax.map`` so the reverse-mode autodiff tape only needs to + retain one chunk at a time. Combined with ``jax.checkpoint`` on + ``f``, this makes reverse-mode memory proportional to + ``n_obs_per_batch`` rather than to the full ``n_obs``. + """ + n_obs = xs[0].shape[0] + if n_obs_per_batch is None or n_obs_per_batch >= n_obs: + return jax.vmap(f)(*xs) + + def _tupled(args: tuple[Array, ...]) -> Array: + return f(*args) + + return jax.lax.map(_tupled, xs, batch_size=n_obs_per_batch) + + +def _initial_loglike_per_obs( + *, + mixture_weights: Array, + mixture_means: Array, + mixture_chol_covs: Array, + control_params: Array, + loadings: Array, + meas_sds: Array, + measurements: Array, + controls: Array, + loading_mask: Array, + nodes: Array, + weights: Array, + n_obs_per_batch: int | None = None, + stability_floor: float, +) -> Array: + """Compute log-likelihood for each observation at the initial period. + + Return: + Shape (n_obs,) log-likelihood per observation. + + """ + # Expand loadings into full matrix using mask + n_measures, n_factors = loading_mask.shape + full_loadings = jnp.zeros((n_measures, n_factors)) + full_loadings = full_loadings.at[loading_mask].set(loadings) + + # NaN-safety: build per-obs measurement mask and replace NaN entries + # with 0 so residuals stay finite. The mask is used inside the + # integral to zero out missing-measurement contributions. + meas_mask = jnp.isfinite(measurements) + safe_measurements = jnp.where(meas_mask, measurements, 0.0) + + # Control contribution: (n_obs, n_measures) + control_contrib = controls @ control_params.T + + # Residuals before factor contribution: (n_obs, n_measures) + residuals_base = safe_measurements - control_contrib + + @jax.checkpoint + def _single_obs_loglike(residual_base: Array, mask_i: Array) -> Array: + """Log-likelihood for a single observation, integrated over factors. + + `jax.checkpoint` keeps the forward pass small: the per-observation + quadrature tape is discarded and recomputed during the backward + pass, so reverse-mode autodiff memory scales with the per-obs + parameter footprint instead of ``n_obs * n_quadrature_nodes``. + """ + return _integrate_initial_single_obs( + residual_base=residual_base, + meas_mask=mask_i, + full_loadings=full_loadings, + meas_sds=meas_sds, + mixture_weights=mixture_weights, + mixture_means=mixture_means, + mixture_chol_covs=mixture_chol_covs, + nodes=nodes, + weights=weights, + stability_floor=stability_floor, + ) + + return _map_over_obs( + _single_obs_loglike, + residuals_base, + meas_mask, + n_obs_per_batch=n_obs_per_batch, + ) + + +def _initial_loglike_per_obs_conditional( + *, + mixture_weights: Array, + mixture_means: Array, + mixture_chol_covs: Array, + control_params: Array, + loadings: Array, + meas_sds: Array, + measurements: Array, + controls: Array, + observed_factor_values: Array, + loading_mask: Array, + nodes: Array, + weights: Array, + n_latent: int, + stability_floor: float, + n_obs_per_batch: int | None = None, +) -> Array: + """Per-observation log-likelihood with Schur-complement conditioning. + + For each individual i with observed factors Y_i, the likelihood is:: + + L_i = p(Y_i) * integral p(Z_i | theta) p(theta | Y_i) dtheta + = sum_l pi_l N(Y_i | mu_Y_l, Sigma_YY_l) + * sum_q w_q prod_m N(residual_m | 0, sd_m) + + where theta is drawn from p(theta | Y_i, component l) using the + conditional mean and Cholesky factor derived from the joint + (latent, observed) covariance matrix via the Schur complement. + + Note the identity: combining the log-mixture over components l with + the measurement density gives an equivalent formulation where each + component's contribution is weighted by pi_l * N(Y_i | mu_Y_l, Sigma_YY_l). + + """ + n_measures = loading_mask.shape[0] + full_loadings = jnp.zeros((n_measures, n_latent)) + full_loadings = full_loadings.at[loading_mask].set(loadings) + + # NaN-safety for measurements (see `_initial_loglike_per_obs`). + meas_mask = jnp.isfinite(measurements) + safe_measurements = jnp.where(meas_mask, measurements, 0.0) + + control_contrib = controls @ control_params.T + residuals_base = safe_measurements - control_contrib + + @jax.checkpoint + def _single_obs_loglike(residual_base: Array, y_i: Array, mask_i: Array) -> Array: + return _integrate_initial_single_obs_conditional( + residual_base=residual_base, + y_i=y_i, + meas_mask=mask_i, + full_loadings=full_loadings, + meas_sds=meas_sds, + mixture_weights=mixture_weights, + mixture_means=mixture_means, + mixture_chol_covs=mixture_chol_covs, + nodes=nodes, + weights=weights, + n_latent=n_latent, + stability_floor=stability_floor, + ) + + return _map_over_obs( + _single_obs_loglike, + residuals_base, + observed_factor_values, + meas_mask, + n_obs_per_batch=n_obs_per_batch, + ) + + +def _integrate_initial_single_obs_conditional( + *, + residual_base: Array, + y_i: Array, + meas_mask: Array, + full_loadings: Array, + meas_sds: Array, + mixture_weights: Array, + mixture_means: Array, + mixture_chol_covs: Array, + nodes: Array, + weights: Array, + n_latent: int, + stability_floor: float, +) -> Array: + """Quadrature integration for one individual with observed-factor conditioning. + + Per component l: + - Split joint (mu, L) into latent and observed blocks. + - Compute marginal p(Y_i | l) from (mu_Y_l, L_Y_l). + - Compute conditional mean mu_{theta | Y_i, l} and Cholesky L_{theta | Y, l} + via Schur complement. + - Transform nodes: theta_q = mu_{theta|Y,l} + L_{theta|Y,l} @ z_q. + - Evaluate measurement density at theta_q, sum over quadrature. + + Aggregate with log-sum-exp over components. + """ + n_components = mixture_weights.shape[0] + + def _component_log_kernel(l_idx: Array) -> Array: + mu_full = mixture_means[l_idx] + chol_full = mixture_chol_covs[l_idx] + cov_full = chol_full @ chol_full.T + + mu_theta = mu_full[:n_latent] + mu_y = mu_full[n_latent:] + cov_tt = cov_full[:n_latent, :n_latent] + cov_ty = cov_full[:n_latent, n_latent:] + cov_yy = cov_full[n_latent:, n_latent:] + + # Marginal density of Y_i under component l + chol_yy = jnp.linalg.cholesky(cov_yy) + log_marg_y = _log_mvn_pdf_chol(y_i, mu_y, chol_yy) + + # Conditional mean and Cholesky of theta | Y_i + alpha = jax.scipy.linalg.cho_solve((chol_yy, True), (y_i - mu_y)) + cond_mean = mu_theta + cov_ty @ alpha + # Sigma_{theta|Y} = Sigma_tt - Sigma_ty Sigma_yy^{-1} Sigma_yt + solve_tt = jax.scipy.linalg.cho_solve((chol_yy, True), cov_ty.T) + cond_cov = cov_tt - cov_ty @ solve_tt + # Jitter for numerical stability before Cholesky + cond_cov = cond_cov + 1e-10 * jnp.eye(n_latent) + cond_chol = jnp.linalg.cholesky(cond_cov) + + def _log_node(z_q: Array) -> Array: + theta_q = cond_mean + cond_chol @ z_q + residuals = residual_base - full_loadings @ theta_q + log_pdf = _log_normal_pdf(residuals, jnp.zeros_like(residuals), meas_sds) + return jnp.sum(jnp.where(meas_mask, log_pdf, 0.0)) + + log_meas = jax.vmap(_log_node)(nodes) + log_integral = jax.scipy.special.logsumexp(log_meas + jnp.log(weights)) + + return ( + jnp.log(mixture_weights[l_idx] + stability_floor) + + log_marg_y + + log_integral + ) + + comp_log = jax.vmap(_component_log_kernel)(jnp.arange(n_components)) + return jax.scipy.special.logsumexp(comp_log) + + +def _log_mvn_pdf_chol(x: Array, mean: Array, chol: Array) -> Array: + """Log pdf of multivariate normal given the lower-triangular Cholesky.""" + diff = x - mean + sol = jax.scipy.linalg.solve_triangular(chol, diff, lower=True) + log_det = jnp.sum(jnp.log(jnp.diag(chol))) + k = x.shape[0] + return -0.5 * k * jnp.log(2 * jnp.pi) - log_det - 0.5 * jnp.dot(sol, sol) + + +def _integrate_initial_single_obs( + *, + residual_base: Array, + meas_mask: Array, + full_loadings: Array, + meas_sds: Array, + mixture_weights: Array, + mixture_means: Array, + mixture_chol_covs: Array, + nodes: Array, + weights: Array, + stability_floor: float, +) -> Array: + """Quadrature integration for one observation at the initial period. + + For each quadrature node z_q and mixture component l:: + + theta_q,l = mu_l + L_l @ z_q + kernel = pi_l * N(theta_q,l | mu_l, Sigma_l) + * prod_m N(obs_m | loading_m' theta_q,l, sd_m^2) + + Since z_q is standard normal and we transform + theta = mu_l + L_l @ z_q, the density of the mixture at theta is + already accounted for by the quadrature (importance sampling with + the mixture as proposal). So we just need:: + + kernel = sum_l pi_l * |L_l| + * prod_m N(obs_m | loading_m' (mu_l + L_l @ z_q), + sd_m^2) + + But with Halton nodes from N(0,I), the correct formula is:: + + L_i = sum_q w_q * sum_l pi_l + * prod_m N(residual_m + - loading_m' (mu_l + L_l z_q), 0, sd_m) + + """ + n_components = mixture_weights.shape[0] + + def _node_contribution(z_q: Array) -> Array: + """Contribution from one quadrature node.""" + total = jnp.array(0.0) + + for l_idx in range(n_components): + # Transform node to factor space for component l + theta_q = mixture_means[l_idx] + mixture_chol_covs[l_idx] @ z_q + + # Measurement residuals: obs - control_contrib - loadings @ theta + residuals = residual_base - full_loadings @ theta_q + + # Log measurement density: sum of log N(residual_m, 0, sd_m), + # masking out missing measurements (NaN replaced by 0 upstream). + log_pdf = _log_normal_pdf(residuals, jnp.zeros_like(residuals), meas_sds) + log_meas_density = jnp.sum(jnp.where(meas_mask, log_pdf, 0.0)) + + total = total + mixture_weights[l_idx] * jnp.exp(log_meas_density) + + return total + + # Integrate over quadrature nodes + contributions = jax.vmap(_node_contribution)(nodes) + integrated = jnp.dot(weights, contributions) + + return jnp.log(integrated + stability_floor) + + +def af_per_obs_loglike_transition( + params: Array, + *, + n_state_factors: int, + n_endogenous_factors: int, + n_measures: int, + n_controls: int, + measurements: Array, + controls: Array, + loading_mask: Array, + prev_measurements: Array, + prev_controls: Array, + prev_loading_mask: Array, + prev_control_params: Array, + prev_loadings_flat: Array, + prev_meas_sds: Array, + prev_distribution: dict[str, Array], + chain_links: tuple[ChainLink, ...], + obs_factor_values_chain: Array, + joint_nodes: Array, + joint_weights: Array, + transition_func: Callable, + total_n_transition_params: int, + total_n_inv_params: int, + n_inv_eq_params_per: int, + observed_factor_values: Array, + stability_floor: float, + state_factor_indices_in_latent: Array | None = None, + n_shock_factors: int | None = None, + shock_factor_indices: Array | None = None, + n_obs_per_batch: int | None = None, +) -> Array: + """Per-observation log-likelihood for a transition period (Step t). + + Same inputs as `af_loglike_transition`; returns the shape-``(n_obs,)`` + vector of per-observation log-likelihoods instead of the aggregated + negative mean. Used for score-based inference. + """ + effective_n_shock = n_state_factors if n_shock_factors is None else n_shock_factors + if shock_factor_indices is None: + shock_factor_indices = jnp.arange(effective_n_shock) + if state_factor_indices_in_latent is None: + # Default: assume state factors precede endogenous factors in the + # latent-factor ordering (the existing convention). Callers that + # don't follow that convention must pass explicit indices. + state_factor_indices_in_latent = jnp.arange(n_state_factors) + + parsed = _parse_transition_params( + params, + n_state_factors, + n_endogenous_factors, + n_measures, + n_controls, + total_n_transition_params, + total_n_inv_params, + n_inv_eq_params_per, + n_shock_factors=effective_n_shock, + ) + + n_prev_measures = prev_loading_mask.shape[0] + n_prev_factors = prev_loading_mask.shape[1] + prev_full_loadings = jnp.zeros((n_prev_measures, n_prev_factors)) + prev_full_loadings = prev_full_loadings.at[prev_loading_mask].set( + prev_loadings_flat + ) + prev_control_contrib = prev_controls @ prev_control_params.T + # NaN-safety for prev-period measurements (see `_initial_loglike_per_obs`). + prev_meas_mask = jnp.isfinite(prev_measurements) + safe_prev_measurements = jnp.where(prev_meas_mask, prev_measurements, 0.0) + prev_residuals_base = safe_prev_measurements - prev_control_contrib + + return _transition_loglike_per_obs( + transition_params=parsed["transition_params"], + shock_sds=parsed["shock_sds"], + inv_eq_params=parsed["inv_eq_params"], + inv_sds=parsed["inv_sds"], + control_params=parsed["control_params"], + loadings_flat=parsed["loadings_flat"], + meas_sds=parsed["meas_sds"], + measurements=measurements, + controls=controls, + loading_mask=loading_mask, + prev_residuals_base=prev_residuals_base, + prev_meas_mask=prev_meas_mask, + prev_full_loadings=prev_full_loadings, + prev_meas_sds=prev_meas_sds, + prev_distribution=prev_distribution, + chain_links=chain_links, + obs_factor_values_chain=obs_factor_values_chain, + joint_nodes=joint_nodes, + joint_weights=joint_weights, + transition_func=transition_func, + n_state_factors=n_state_factors, + n_endogenous_factors=n_endogenous_factors, + n_shock_factors=effective_n_shock, + shock_factor_indices=shock_factor_indices, + state_factor_indices_in_latent=state_factor_indices_in_latent, + observed_factor_values=observed_factor_values, + stability_floor=stability_floor, + n_obs_per_batch=n_obs_per_batch, + ) + + +def af_loglike_transition( + params: Array, + *, + n_state_factors: int, + n_endogenous_factors: int, + n_measures: int, + n_controls: int, + measurements: Array, + controls: Array, + loading_mask: Array, + prev_measurements: Array, + prev_controls: Array, + prev_loading_mask: Array, + prev_control_params: Array, + prev_loadings_flat: Array, + prev_meas_sds: Array, + prev_distribution: dict[str, Array], + chain_links: tuple[ChainLink, ...], + obs_factor_values_chain: Array, + joint_nodes: Array, + joint_weights: Array, + transition_func: Callable, + total_n_transition_params: int, + total_n_inv_params: int, + n_inv_eq_params_per: int, + observed_factor_values: Array, + stability_floor: float, + state_factor_indices_in_latent: Array | None = None, + n_shock_factors: int | None = None, + shock_factor_indices: Array | None = None, + n_obs_per_batch: int | None = None, +) -> Array: + """Negative log-likelihood for a transition period (Step t). + + Integrate over latent factors at period t-1 and production shocks + via a single joint Halton design covering ALL randomness needed at + this step (mirroring MATLAB's ``create_nodes_weights_01/12``): + + * the period-0 latent draw ``z_state`` (shared across mixture comps) + * one ``z_inv`` and one ``z_P`` per prior chain step (periods 1..t-1) + * one ``z_inv`` and one ``z_P`` for the current step (t-1)→t + + The chained sample θ_0 → θ_{t-1} is rebuilt on-demand inside the + integrand from this joint Halton via ``_rebuild_chain_at_period``. + The likelihood conditions on individual data via re-evaluation of + previous-period state-factor measurements at each Halton draw:: + + L_i = sum_j w_j * sum_l pi_{l,i} + * [prod_m N(Z_{t-1,m,i} | c~_m + lam~_m' th_{t-1}_j, sd~_m)] + * [prod_m N(Z_{t,m,i} | c_m + lam_m' th_t_j, sd_m)] + + where ``th_{t-1}_j = chain_rebuild(joint_z_j)`` and + ``th_t_j = f(th_{t-1}_j; delta) + sd_shock * z_shock_curr_j``. + Tildes denote already-estimated parameters from previous steps. + + Args: + params: Full parameter vector in template order. Fixed entries are + held constant by optimagic `FixedConstraint`s attached outside. + n_state_factors: Number of state factors with transition equations. + n_endogenous_factors: Number of endogenous (investment) factors. + n_measures: Number of measurements at period t. + n_controls: Number of controls at period t. + measurements: Shape (n_obs, n_measures), measurements at period t. + controls: Shape (n_obs, n_controls), controls at period t. + loading_mask: Shape (n_measures, n_state_factors), loading mask. + prev_measurements: Shape (n_obs, n_prev_measures), measurements t-1. + prev_controls: Shape (n_obs, n_prev_controls), controls at t-1. + prev_loading_mask: Shape (n_prev_measures, n_factors), prev loadings. + prev_control_params: Shape (n_prev_measures, n_prev_controls), fixed. + prev_loadings_flat: Packed loadings from previous period, fixed. + prev_meas_sds: Shape (n_prev_measures,), fixed from previous step. + prev_distribution: Dict with keys "cond_weights", "means", "chol_covs". + joint_nodes: Shape (n_halton, n_state + n_shock + n_endogenous), + standard-normal Halton draws partitioned into state, production + shock, and investment shock components. `n_shock` equals + `n_shock_factors` (defaults to `n_state_factors`). + joint_weights: Shape (n_halton,) quadrature weights (uniform + 1/n_halton for Halton integration). + transition_func: Combined transition f(states, params) -> new_states. + total_n_transition_params: Total transition params across all factors. + total_n_inv_params: Total investment equation parameters. + n_inv_eq_params_per: Investment equation parameters per endogenous factor. + observed_factor_values: Shape (n_obs, n_obs_factors), observed factor data. + stability_floor: Numerical stability floor. + chain_links: Tuple of `ChainLink` objects, one per prior transition + step (length `period - 1` for the (period-1)→period step). + Empty for the 0→1 step. Carries each prior period's just-fitted + parameters so the chain replays from period 0 inside this + step's joint-Halton chain rebuild. + obs_factor_values_chain: Per-obs observed factor values at each + chain link's source period, shape `(n_obs, n_chain, + n_observed_factors)`. The current step's observed factors are + passed via `observed_factor_values`. + state_factor_indices_in_latent: Shape (n_state_factors,) int array + mapping each state factor to its column index in the + previous-period loading mask (which is in `latent_factors` order + = state + endogenous, possibly interleaved). Used to restrict + the prev-meas factor to state-factor loadings, mirroring + MATLAB's `create_nodes_weights_12` (which omits prev-period + inv measurements from the chained-sample importance weight). + Defaults to `arange(n_state_factors)` (assuming state factors + precede endogenous in the latent ordering). + n_shock_factors: Number of state factors that get a production shock. + Defaults to `n_state_factors`. Factors without a shock are + integrated deterministically (their shock dimension is dropped + from the joint Halton draw). + shock_factor_indices: Shape (n_shock_factors,) int array mapping each + shock slot to its position in the state-factor ordering. Required + when `n_shock_factors < n_state_factors`. + n_obs_per_batch: Observations per reverse-mode autodiff chunk. + ``None`` falls back to ``jax.vmap`` (single kernel); a positive + integer uses ``jax.lax.map`` so the backward-pass tape only + retains one chunk at a time. + + Return: + Scalar negative log-likelihood. + + """ + log_likes = af_per_obs_loglike_transition( + params, + n_state_factors=n_state_factors, + n_endogenous_factors=n_endogenous_factors, + n_measures=n_measures, + n_controls=n_controls, + measurements=measurements, + controls=controls, + loading_mask=loading_mask, + prev_measurements=prev_measurements, + prev_controls=prev_controls, + prev_loading_mask=prev_loading_mask, + prev_control_params=prev_control_params, + prev_loadings_flat=prev_loadings_flat, + prev_meas_sds=prev_meas_sds, + prev_distribution=prev_distribution, + chain_links=chain_links, + obs_factor_values_chain=obs_factor_values_chain, + joint_nodes=joint_nodes, + joint_weights=joint_weights, + transition_func=transition_func, + total_n_transition_params=total_n_transition_params, + total_n_inv_params=total_n_inv_params, + n_inv_eq_params_per=n_inv_eq_params_per, + observed_factor_values=observed_factor_values, + stability_floor=stability_floor, + state_factor_indices_in_latent=state_factor_indices_in_latent, + n_shock_factors=n_shock_factors, + shock_factor_indices=shock_factor_indices, + n_obs_per_batch=n_obs_per_batch, + ) + return -jnp.mean(log_likes) + + +def _parse_transition_params( + params: Array, + n_state_factors: int, + n_endogenous_factors: int, + n_measures: int, + n_controls: int, + total_n_transition_params: int, + total_n_inv_params: int, + _n_inv_eq_params_per: int, + *, + n_shock_factors: int | None = None, +) -> dict[str, Array]: + """Parse flat parameter vector for a transition period.""" + effective_n_shock = n_state_factors if n_shock_factors is None else n_shock_factors + idx = 0 + + # Transition parameters (flat, for state factors only) + transition_params = params[idx : idx + total_n_transition_params] + idx += total_n_transition_params + + # Shock SDs per shock-bearing state factor (subset of state factors). + shock_sds = params[idx : idx + effective_n_shock] + idx += effective_n_shock + + # Investment equation params (if any endogenous factors) + inv_eq_params = params[idx : idx + total_n_inv_params] + idx += total_n_inv_params + + # Investment shock SDs + inv_sds = params[idx : idx + n_endogenous_factors] + idx += n_endogenous_factors + + # Control params: (n_measures, n_controls) + n_ctrl = n_measures * n_controls + control_params = params[idx : idx + n_ctrl].reshape(n_measures, n_controls) + idx += n_ctrl + + # Packed loadings + n_loadings = int(params.shape[0]) - idx - n_measures + loadings_flat = params[idx : idx + n_loadings] + idx += n_loadings + + # Measurement SDs + meas_sds = params[idx : idx + n_measures] + + return { + "transition_params": transition_params, + "shock_sds": shock_sds, + "inv_eq_params": inv_eq_params, + "inv_sds": inv_sds, + "control_params": control_params, + "loadings_flat": loadings_flat, + "meas_sds": meas_sds, + } + + +def _transition_loglike_per_obs( + *, + transition_params: Array, + shock_sds: Array, + inv_eq_params: Array, + inv_sds: Array, + control_params: Array, + loadings_flat: Array, + meas_sds: Array, + measurements: Array, + controls: Array, + loading_mask: Array, + prev_residuals_base: Array, + prev_meas_mask: Array, + prev_full_loadings: Array, + prev_meas_sds: Array, + prev_distribution: dict[str, Array], + chain_links: tuple[ChainLink, ...], + obs_factor_values_chain: Array, + joint_nodes: Array, + joint_weights: Array, + transition_func: Callable, + n_state_factors: int, + n_endogenous_factors: int, + n_shock_factors: int, + shock_factor_indices: Array, + state_factor_indices_in_latent: Array, + observed_factor_values: Array, + stability_floor: float, + n_obs_per_batch: int | None = None, +) -> Array: + """Compute per-observation log-likelihood for a transition period. + + Uses the joint-Halton chain rebuild scheme: at every transition step, + a single joint Halton design covers (z_state, z_inv_chain, + z_shock_chain, z_inv_t, z_shock_t). The chained sample θ_0 → θ_{t-1} + is rebuilt on-demand inside the integrand from this single joint + Halton, mirroring MATLAB's ``create_nodes_weights_01/12``. + """ + n_measures, n_loading_factors = loading_mask.shape + full_loadings = jnp.zeros((n_measures, n_loading_factors)) + full_loadings = full_loadings.at[loading_mask].set(loadings_flat) + + # NaN-safety for current-period measurements (see `_initial_loglike_per_obs`). + meas_mask = jnp.isfinite(measurements) + safe_measurements = jnp.where(meas_mask, measurements, 0.0) + + control_contrib = controls @ control_params.T + residuals_base = safe_measurements - control_contrib + + cond_weights = prev_distribution["cond_weights"] + cond_means = prev_distribution["cond_means"] + cond_chols = prev_distribution["cond_chols"] + # cond_means shape (n_components, n_obs, n_state). Re-shape to + # (n_obs, n_components, n_state) so we can map per-obs. + cond_means_by_obs = jnp.transpose(cond_means, (1, 0, 2)) + + @jax.checkpoint + def _single_obs( + residual_base: Array, + prev_residual_base: Array, + obs_cond_weights: Array, + obs_factor_values: Array, + obs_cond_means: Array, + obs_factor_values_chain_i: Array, + meas_mask_i: Array, + prev_meas_mask_i: Array, + ) -> Array: + return _integrate_transition_single_obs( + residual_base=residual_base, + meas_mask=meas_mask_i, + full_loadings=full_loadings, + meas_sds=meas_sds, + prev_residual_base=prev_residual_base, + prev_meas_mask=prev_meas_mask_i, + prev_full_loadings=prev_full_loadings, + prev_meas_sds=prev_meas_sds, + obs_cond_weights=obs_cond_weights, + obs_cond_means=obs_cond_means, + cond_chols=cond_chols, + chain_links=chain_links, + obs_factor_values_chain=obs_factor_values_chain_i, + joint_nodes=joint_nodes, + joint_weights=joint_weights, + transition_func=transition_func, + transition_params=transition_params, + shock_sds=shock_sds, + inv_eq_params=inv_eq_params, + inv_sds=inv_sds, + n_state_factors=n_state_factors, + n_endogenous_factors=n_endogenous_factors, + n_shock_factors=n_shock_factors, + shock_factor_indices=shock_factor_indices, + state_factor_indices_in_latent=state_factor_indices_in_latent, + obs_factor_values=obs_factor_values, + stability_floor=stability_floor, + ) + + return _map_over_obs( + _single_obs, + residuals_base, + prev_residuals_base, + cond_weights, + observed_factor_values, + cond_means_by_obs, + obs_factor_values_chain, + meas_mask, + prev_meas_mask, + n_obs_per_batch=n_obs_per_batch, + ) + + +def _compute_investment( + theta_prev: Array, + obs_factor_values: Array, + inv_eq_params: Array, + inv_sds: Array, + eps_i: Array, + n_endogenous_factors: int, + n_state_factors: int, +) -> Array: + """Compute investment from the AF investment equation. + + I_j = beta_0 + beta_k @ theta + beta_y @ Y + sigma_I * eps_I + + """ + n_obs_factors = obs_factor_values.shape[0] + n_per = 1 + n_state_factors + n_obs_factors + result = jnp.zeros(n_endogenous_factors) + for j in range(n_endogenous_factors): + beta = inv_eq_params[j * n_per : (j + 1) * n_per] + intercept = beta[0] + state_coeffs = beta[1 : 1 + n_state_factors] + obs_coeffs = beta[1 + n_state_factors :] + inv_j = ( + intercept + + jnp.dot(state_coeffs, theta_prev) + + jnp.dot(obs_coeffs, obs_factor_values) + + inv_sds[j] * eps_i[j] + ) + result = result.at[j].set(inv_j) + return result + + +def _rebuild_chain_at_period( + *, + z_state: Array, + z_inv_per_step: Array, + z_shock_per_step: Array, + initial_mean: Array, + initial_chol: Array, + chain_links: tuple[ChainLink, ...], + obs_factor_values_at_obs_per_step: Array, + n_state_factors: int, + n_endogenous_factors: int, +) -> Array: + """Forward-iterate θ_0 → θ_{t-1} from one joint-Halton draw. + + Mirrors MATLAB's `create_nodes_weights_12`: rebuild the chained sample + on-demand inside the transition likelihood from a single joint Halton + draw, so the (z_state, z_inv_per_step, z_shock_per_step) triple is + quasi-uniformly distributed in joint space at each index `j` (rather + than paired across two independent Halton sequences as the previous + static `samples_per_component` carry-over did). + + Args: + z_state: Shape (n_state_factors,). Standard-normal sample driving + the period-0 latent state for one (j, i, l). + z_inv_per_step: Shape (n_chain, n_endogenous_factors). One row + per prior chain step (period 1 .. period t-1). Standard-normal + inv shocks. + z_shock_per_step: Shape (n_chain, n_shock_factors). Standard-normal + production shocks per prior chain step. + initial_mean: Shape (n_state_factors,). Schur-conditional mean of + the period-0 state for one (i, l). + initial_chol: Shape (n_state_factors, n_state_factors). Cholesky + of the period-0 conditional covariance, shared across i. + chain_links: Tuple of ChainLink objects, one per prior transition + step (period 1 → period 2 → ...). Length n_chain. + obs_factor_values_at_obs_per_step: Shape (n_chain, n_obs_factors). + Observed factor values at the *source* period of each chain + step (i.e. period 0 for the first link, period 1 for the + second, etc.) for one observation. + n_state_factors: Number of state factors. + n_endogenous_factors: Number of endogenous factors (investment). + + Return: + theta at period t-1 (= start period of the current likelihood + step), shape (n_state_factors,). When `chain_links` is empty, + returns the period-0 state directly. + """ + theta = initial_mean + initial_chol @ z_state + for step_idx, link in enumerate(chain_links): + z_inv = z_inv_per_step[step_idx] + z_shock = z_shock_per_step[step_idx] + obs_y = obs_factor_values_at_obs_per_step[step_idx] + inv = _compute_investment( + theta, + obs_y, + link.inv_eq_params, + link.inv_sds, + z_inv, + n_endogenous_factors, + n_state_factors, + ) + full_with_obs = jnp.concatenate([theta, inv, obs_y]) + state_shock_contrib = ( + jnp.zeros(n_state_factors) + .at[link.shock_factor_indices] + .set(link.shock_sds * z_shock) + ) + theta = ( + link.transition_func(full_with_obs, link.transition_params) + + state_shock_contrib + ) + return theta + + +def _integrate_transition_single_obs( + *, + residual_base: Array, + meas_mask: Array, + full_loadings: Array, + meas_sds: Array, + prev_residual_base: Array, + prev_meas_mask: Array, + prev_full_loadings: Array, + prev_meas_sds: Array, + obs_cond_weights: Array, + obs_cond_means: Array, + cond_chols: Array, + chain_links: tuple[ChainLink, ...], + obs_factor_values_chain: Array, + joint_nodes: Array, + joint_weights: Array, + transition_func: Callable, + transition_params: Array, + shock_sds: Array, + inv_eq_params: Array, + inv_sds: Array, + n_state_factors: int, + n_endogenous_factors: int, + n_shock_factors: int, + shock_factor_indices: Array, + state_factor_indices_in_latent: Array, + obs_factor_values: Array, + stability_floor: float, +) -> Array: + """Joint-Halton importance integration for one obs at a transition step. + + Rebuilds the chained sample theta_0 -> theta_{t-1} on-demand from a + single joint Halton design at every transition step (matching MATLAB's + ``create_nodes_weights_01/12``). At index j, the joint Halton draw + couples (z_state, z_inv_chain, z_shock_chain, z_inv_t, z_shock_t) in + a quasi-uniform 3D+ space, replacing the previous broken scheme that + paired a period-0-seeded chained-sample's z_state[j] with a + period-t-seeded shock z[j] across two independent Halton sequences at + the same index. The split scheme aliased into sigma_prod optimization + (see commit message and ``sigma-prod-collapse-2026-05-07.md``). + + The non-trivial inputs: + + * ``obs_cond_means``: per-component Schur-conditional means for this + obs at period 0, shape ``(n_components, n_state_factors)``. + * ``cond_chols``: per-component Schur-conditional Cholesky factors at + period 0, shape ``(n_components, n_state_factors, n_state_factors)``. + Shared across observations. + * ``chain_links``: tuple of `ChainLink` objects, one per prior + transition step (length ``period - 1`` for the (period-1)->period + step). Empty for the 0->1 step. + * ``obs_factor_values_chain``: observed factor values at the source + period of each prior chain step for this observation, shape + ``(n_chain, n_obs_factors)``. The current step's observed factors + are passed via ``obs_factor_values``. + + The joint Halton design has dimension + ``n_state_factors + n_chain * (n_shock_factors + n_endogenous_factors) + + (n_shock_factors + n_endogenous_factors)``. Layout per draw j: + + * ``[:n_state_factors]``: z_state for theta_0 (shared across comps) + * for s in 0..n_chain-1: per-step ``z_shock`` followed by ``z_inv`` + * tail: current step's ``z_shock`` followed by ``z_inv``. + + The previous-period measurement density factor is restricted to + state-factor loadings (matches MATLAB's deliberate omission of + ``Z_inv_est_0`` from the chained-sample importance weight at + ``create_nodes_weights_12``). + """ + n_components = obs_cond_weights.shape[0] + n_chain = len(chain_links) + z_block = n_shock_factors + n_endogenous_factors + + def _log_draw_contribution(j_idx: Array) -> Array: + """Per-draw log kernel at Halton index j, LogSumExp over mixture comps.""" + z_at_j = joint_nodes[j_idx] + z_state = z_at_j[:n_state_factors] + # Chain shocks at indices [n_state, n_state + n_chain*z_block). + chain_block_start = n_state_factors + chain_block_end = chain_block_start + n_chain * z_block + if n_chain > 0: + z_chain = z_at_j[chain_block_start:chain_block_end].reshape( + n_chain, z_block + ) + z_shock_chain = z_chain[:, :n_shock_factors] + z_inv_chain = z_chain[:, n_shock_factors:] + else: + z_shock_chain = jnp.zeros((0, n_shock_factors)) + z_inv_chain = jnp.zeros((0, n_endogenous_factors)) + # Current step shocks at the tail. + z_shock_curr = z_at_j[chain_block_end : chain_block_end + n_shock_factors] + z_inv_shock = z_at_j[chain_block_end + n_shock_factors :] + + log_component_vals = [] + for l_idx in range(n_components): + # Rebuild θ_{t-1} from the joint Halton. + theta_prev = _rebuild_chain_at_period( + z_state=z_state, + z_inv_per_step=z_inv_chain, + z_shock_per_step=z_shock_chain, + initial_mean=obs_cond_means[l_idx], + initial_chol=cond_chols[l_idx], + chain_links=chain_links, + obs_factor_values_at_obs_per_step=obs_factor_values_chain, + n_state_factors=n_state_factors, + n_endogenous_factors=n_endogenous_factors, + ) + inv = _compute_investment( + theta_prev, + obs_factor_values, + inv_eq_params, + inv_sds, + z_inv_shock, + n_endogenous_factors, + n_state_factors, + ) + full_prev_with_obs = jnp.concatenate([theta_prev, inv, obs_factor_values]) + + # Previous-period measurement density: state-factor (skill) + # measurements at theta_prev only. Endogenous-factor (inv) + # measurements at t-1 are NOT re-evaluated here -- they were + # already used as current-period measurements at the (t-2)->(t-1) + # step (matches MATLAB's likelihood_12, which omits Z_inv_est_0 + # from the chained-sample importance weight). For rows that load + # only on endogenous factors, the slice picks zero loadings and + # the residual reduces to the centered measurement, contributing + # a per-obs constant that is invariant under the parameters. + prev_state_loadings = prev_full_loadings[:, state_factor_indices_in_latent] + prev_residuals = prev_residual_base - prev_state_loadings @ theta_prev + prev_log_pdf = _log_normal_pdf( + prev_residuals, + jnp.zeros_like(prev_residuals), + prev_meas_sds, + ) + log_prev_inv_meas = jnp.sum(jnp.where(prev_meas_mask, prev_log_pdf, 0.0)) + + # Current-period measurement density. Shocks only apply to + # factors with has_production_shock=True; scatter them into the + # state-factor ordering and leave deterministic factors as is. + state_shock_contrib = ( + jnp.zeros(n_state_factors) + .at[shock_factor_indices] + .set(shock_sds * z_shock_curr) + ) + theta_t = ( + transition_func(full_prev_with_obs, transition_params) + + state_shock_contrib + ) + all_factors_t = jnp.concatenate([theta_t, inv]) + residuals = residual_base - full_loadings @ all_factors_t + log_pdf = _log_normal_pdf(residuals, jnp.zeros_like(residuals), meas_sds) + log_meas = jnp.sum(jnp.where(meas_mask, log_pdf, 0.0)) + + log_kernel = ( + jnp.log(obs_cond_weights[l_idx] + stability_floor) + + log_prev_inv_meas + + log_meas + ) + log_component_vals.append(log_kernel) + + return jax.scipy.special.logsumexp(jnp.array(log_component_vals)) + + n_halton = joint_nodes.shape[0] + log_contribs = jax.vmap(_log_draw_contribution)(jnp.arange(n_halton)) + return jax.scipy.special.logsumexp(log_contribs + jnp.log(joint_weights)) + + +def _log_normal_pdf(x: Array, mean: Array, sd: Array) -> Array: + """Log of normal PDF, element-wise.""" + return -0.5 * jnp.log(2 * jnp.pi) - jnp.log(sd) - 0.5 * ((x - mean) / sd) ** 2 + + +def create_loglike_and_gradient( + loglike_fn: Callable, + **kwargs: Any, # noqa: ANN401 +) -> Callable: + """Create a jitted function returning (loglike, gradient). + + Args: + loglike_fn: The negative log-likelihood function. + **kwargs: Keyword arguments to partially apply (data, nodes, etc.). + + Return: + Function mapping free_params -> (neg_loglike, gradient). + + """ + partial_fn = functools.partial(loglike_fn, **kwargs) + value_and_grad_fn = jax.value_and_grad(partial_fn) + return jax.jit(value_and_grad_fn) diff --git a/src/skillmodels/af/params.py b/src/skillmodels/af/params.py new file mode 100644 index 00000000..45a00a08 --- /dev/null +++ b/src/skillmodels/af/params.py @@ -0,0 +1,477 @@ +"""Parameter index construction and parsing for AF estimation.""" + +from types import MappingProxyType +from typing import Any + +import numpy as np +import optimagic as om +import pandas as pd + +from skillmodels.common.constraints import FixedConstraintWithValue +from skillmodels.common.types import Normalizations, TransitionInfo + + +def get_initial_period_params_index( + *, + n_mixture_components: int, + latent_factors: tuple[str, ...], + measurements_period_0: dict[str, tuple[str, ...]], + controls: tuple[str, ...], + observed_factors: tuple[str, ...] = (), + reconstructed_factors: tuple[str, ...] = (), +) -> pd.MultiIndex: + """Build parameter index for the initial period (Step 0). + + Parameters estimated in Step 0: + - Mixture weights, means, Cholesky covariances for the joint distribution + of the *state* latent factors (those with + ``has_initial_distribution=True``) and observed factors at period 0. + - Investment equation parameters (one block per ``reconstructed_factor``) + and an investment shock SD per reconstructed factor. These pin the + period-0 value of each reconstructed factor as a deterministic + function of the state latents plus a shock. + - Measurement loadings, intercepts, SDs for period 0. + + When ``observed_factors`` is non-empty, the initial distribution is + modelled over the joint vector (state_latent, observed). Per-individual + observed values let the likelihood condition on them via the Schur + complement, which concentrates Halton draws and improves estimation + precision. + + Args: + n_mixture_components: Number of Gaussian mixture components. + latent_factors: Names of *all* latent factors (including reconstructed + ones). Used for loading entries in the measurement block so + reconstructed factors can still load on period-0 measurements. + measurements_period_0: Factor name -> tuple of measurement variable names. + controls: Control variable names (includes "constant"). + observed_factors: Names of observed factors included in the joint + initial distribution. + reconstructed_factors: Latent factors with + ``has_initial_distribution=False``. These are excluded from the + mixture and receive their own investment-equation block at + period 0 instead. + + Return: + MultiIndex with levels (category, period, name1, name2). + + """ + ind_tups: list[tuple[str, int, str, str]] = [] + state_latent_factors = tuple( + f for f in latent_factors if f not in reconstructed_factors + ) + joint_factors = (*state_latent_factors, *observed_factors) + + # Measurements for the initial step exclude those that only load on + # reconstructed factors; their period-0 measurement params are + # estimated in the transition step 0->1 instead (matching MATLAB's + # transition_01 block convention). + measurements_period_0_filtered = { + f: m for f, m in measurements_period_0.items() if f in state_latent_factors + } + + # Mixture weights + for m in range(n_mixture_components): + ind_tups.append(("mixture_weights", 0, f"mixture_{m}", "-")) + + # Initial means per component per joint factor (state latent + observed) + for m in range(n_mixture_components): + for factor in joint_factors: + ind_tups.append(("initial_states", 0, f"mixture_{m}", factor)) + + # Initial Cholesky covariances per component (lower triangular) over joint factors + for m in range(n_mixture_components): + for row, f1 in enumerate(joint_factors): + for col, f2 in enumerate(joint_factors): + if col <= row: + ind_tups.append( + ( + "initial_cholcovs", + 0, + f"mixture_{m}", + f"{f1}-{f2}", + ) + ) + + # Measurement params for period 0 over state-latent factors only. + # Reconstructed factors' period-0 measurement params live in the + # transition step 0->1 params index. + ind_tups.extend( + _measurement_index_tuples( + period=0, + latent_factors=state_latent_factors, + measurements=measurements_period_0_filtered, + controls=controls, + ) + ) + + return pd.MultiIndex.from_tuples( + ind_tups, + names=["category", "period", "name1", "name2"], + ) + + +def get_transition_period_params_index( + *, + period: int, + latent_factors: tuple[str, ...], + transition_info: TransitionInfo, + measurements_at_period: dict[str, tuple[str, ...]], + controls: tuple[str, ...], + endogenous_factors: tuple[str, ...] = (), + observed_factors: tuple[str, ...] = (), + shock_factors: tuple[str, ...] | None = None, +) -> pd.MultiIndex: + """Build parameter index for a transition period (Step t, t >= 1). + + Parameters estimated in Step t: + - Transition parameters and shock SDs for period t-1 -> t + - Measurement loadings, intercepts, SDs for period t + - Investment equation params for each endogenous factor (if any) + + Args: + period: Calendar period (t >= 1). + latent_factors: Names of latent (non-endogenous) state factors. + transition_info: Transition function info from ProcessedModel. + measurements_at_period: Factor name -> measurement variables at period t. + controls: Control variable names. + endogenous_factors: Names of endogenous (investment) factors. + observed_factors: Names of observed factors. + shock_factors: Subset of `latent_factors` for which a production shock + SD is estimated. Factors omitted here get no shock SD parameter + and are integrated deterministically (dropping their shock + dimension from the Halton draw). Defaults to `latent_factors`. + + Return: + MultiIndex with levels (category, period, name1, name2). + + """ + if shock_factors is None: + shock_factors = latent_factors + ind_tups: list[tuple[str, int, str, str]] = [] + + # Transition parameters (for t-1 -> t) + for factor in latent_factors: + if factor in transition_info.param_names: + for name in transition_info.param_names[factor]: + ind_tups.append(("transition", period - 1, factor, name)) + + # Shock SDs (for t-1 -> t): only factors that have a production shock + for factor in shock_factors: + ind_tups.append(("shock_sds", period - 1, factor, "-")) + + # Investment equation parameters (for t-1) + for endog_factor in endogenous_factors: + # Intercept + ind_tups.append(("investment_eq", period - 1, endog_factor, "constant")) + # Coefficients on each state factor + for factor in latent_factors: + ind_tups.append(("investment_eq", period - 1, endog_factor, factor)) + # Coefficients on observed factors + for obs_factor in observed_factors: + ind_tups.append(("investment_eq", period - 1, endog_factor, obs_factor)) + # Investment shock SD + ind_tups.append(("investment_sds", period - 1, endog_factor, "-")) + + # Measurement params for period t (loadings for ALL factors, not just state) + all_factor_measurements = dict(measurements_at_period) + all_latent = (*latent_factors, *endogenous_factors) + ind_tups.extend( + _measurement_index_tuples( + period=period, + latent_factors=all_latent, + measurements=all_factor_measurements, + controls=controls, + ) + ) + + return pd.MultiIndex.from_tuples( + ind_tups, + names=["category", "period", "name1", "name2"], + ) + + +def _measurement_index_tuples( + *, + period: int, + latent_factors: tuple[str, ...], + measurements: dict[str, tuple[str, ...]], + controls: tuple[str, ...], +) -> list[tuple[str, int, str, str]]: + """Generate index tuples for measurement system parameters. + + Includes controls (intercept/control coefficients), loadings, and + measurement error SDs for all measurements in the given period. + + """ + ind_tups: list[tuple[str, int, str, str]] = [] + + # Collect all unique measurement variables for this period, preserving order + all_measures: list[str] = [] + measure_to_factors: dict[str, list[str]] = {} + for factor, measures in measurements.items(): + for m in measures: + if m not in measure_to_factors: + all_measures.append(m) + measure_to_factors[m] = [] + measure_to_factors[m].append(factor) + + # Controls (intercept + control variables) per measurement + for meas in all_measures: + for ctrl in controls: + ind_tups.append(("controls", period, meas, ctrl)) + + # Loadings: one per (measurement, factor) pair + for meas in all_measures: + for factor in latent_factors: + if factor in measure_to_factors.get(meas, []): + ind_tups.append(("loadings", period, meas, factor)) + + # Measurement error SDs + for meas in all_measures: + ind_tups.append(("meas_sds", period, meas, "-")) + + return ind_tups + + +def get_measurements_per_factor( + factors: MappingProxyType[str, Any], + period: int, +) -> dict[str, tuple[str, ...]]: + """Extract measurement variable names per factor for a given period. + + Args: + factors: ModelSpec.factors mapping. + period: Calendar period index. + + Return: + Dict mapping factor name to tuple of measurement variable names. + + """ + result: dict[str, tuple[str, ...]] = {} + for name, spec in factors.items(): + if period < len(spec.measurements) and len(spec.measurements[period]) > 0: + result[name] = spec.measurements[period] + return result + + +def get_normalizations_for_period( + factors: MappingProxyType[str, Any], + period: int, +) -> dict[str, dict[tuple[str, str], float]]: + """Extract normalization constraints for a given period. + + Return: + Dict of category ("loadings" or "intercepts") to dict of + (measurement, factor_or_control) -> fixed value. + + """ + loading_fixes: dict[tuple[str, str], float] = {} + intercept_fixes: dict[tuple[str, str], float] = {} + + for factor_name, spec in factors.items(): + norms: Normalizations | None = spec.normalizations + if norms is None: + continue + + if norms.loadings is not None and period < len(norms.loadings): + for meas, value in norms.loadings[period].items(): + loading_fixes[(meas, factor_name)] = value + + if norms.intercepts is not None and period < len(norms.intercepts): + for meas, value in norms.intercepts[period].items(): + # intercept normalizations fix the constant control + intercept_fixes[(meas, "constant")] = value + + return {"loadings": loading_fixes, "intercepts": intercept_fixes} + + +def create_af_params_template( + params_index: pd.MultiIndex, + normalizations: dict[str, dict[tuple[str, str], float]], + period: int, + *, + bounds_distance: float = 0.001, +) -> pd.DataFrame: + """Create parameter template DataFrame with bounds and fixed values. + + Args: + params_index: Parameter MultiIndex for this period. + normalizations: Loading and intercept normalizations. + period: Calendar period. + bounds_distance: Minimum distance from zero for SD parameters. + + Return: + DataFrame with columns: value, lower_bound, upper_bound. + + """ + params = pd.DataFrame( + index=params_index, + data={ + "value": np.nan, + "lower_bound": -np.inf, + "upper_bound": np.inf, + }, + ) + + # Set bounds for SD parameters + sd_categories = ("meas_sds", "shock_sds", "investment_sds") + for cat in sd_categories: + mask = params.index.get_level_values("category") == cat + params.loc[mask, "lower_bound"] = bounds_distance + params.loc[mask, "value"] = 0.5 + + # Set bounds for mixture weights + weight_mask = params.index.get_level_values("category") == "mixture_weights" + params.loc[weight_mask, "lower_bound"] = 0.001 + params.loc[weight_mask, "upper_bound"] = 0.999 + + # Bound the log_ces substitution parameter phi from above. Without + # an upper bound the optimizer can drift phi to large positive + # values where exp(states * phi) overflows and the gradient turns + # to NaN. The lower side is well-behaved (phi -> -inf collapses to + # a finite minimum via logsumexp), so leave it unbounded to match + # MATLAB's (-inf, 1 - c) convention. + phi_mask = (params.index.get_level_values("category") == "transition") & ( + params.index.get_level_values("name2") == "phi" + ) + params.loc[phi_mask, "upper_bound"] = 1.0 - bounds_distance + + # Set bounds for Cholesky diagonals (must be positive) + chol_mask = params.index.get_level_values("category") == "initial_cholcovs" + for idx in params.index[chol_mask]: + # Diagonal entries have matching factor names (e.g., "fac1-fac1") + pair = idx[3] # name2 level + parts = pair.split("-") + if len(parts) == 2 and parts[0] == parts[1]: + params.loc[idx, "lower_bound"] = bounds_distance + + # Apply normalization fixes + loading_fixes = normalizations.get("loadings", {}) + for (meas, factor), val in loading_fixes.items(): + loc = ("loadings", period, meas, factor) + if loc in params.index: + params.loc[loc, "value"] = val + params.loc[loc, "lower_bound"] = val + params.loc[loc, "upper_bound"] = val + + intercept_fixes = normalizations.get("intercepts", {}) + for (meas, ctrl), val in intercept_fixes.items(): + loc = ("controls", period, meas, ctrl) + if loc in params.index: + params.loc[loc, "value"] = val + params.loc[loc, "lower_bound"] = val + params.loc[loc, "upper_bound"] = val + + # Default values for parameters still NaN + still_nan = params["value"].isna() + params.loc[still_nan, "value"] = 0.5 + + return params + + +def apply_start_params( + params_template: pd.DataFrame, + start_params: pd.DataFrame, +) -> None: + """Override heuristic defaults with user-supplied starting values. + + Match on the 4-level MultiIndex. Only free (non-fixed) parameters whose + index appears in `start_params` are updated. Fixed parameters and + parameters not in `start_params` are left unchanged. Modifies + `params_template` in place. + """ + common = params_template.index.intersection(start_params.index) + if common.empty: + return + free = ( + params_template.loc[common, "lower_bound"] + != params_template.loc[common, "upper_bound"] + ) + to_update = common[free] + if not to_update.empty: + params_template.loc[to_update, "value"] = start_params.loc[to_update, "value"] + + +def apply_fixed_params( + params_template: pd.DataFrame, + fixed_params: pd.DataFrame, +) -> None: + """Set template values to match user-provided fixed values. + + Used to pin parameters that would otherwise be free -- e.g., identity + transitions and zero shock SDs for time-invariant latent factors. The + pinning itself is enforced through `FixedConstraintWithValue` objects + emitted by `build_optimagic_inputs`; this helper only aligns the + template's starting values with the fixes so early likelihood evaluations + use the correct values. Modifies `params_template` in place. + """ + common = params_template.index.intersection(fixed_params.index) + if common.empty: + return + params_template.loc[common, "value"] = fixed_params.loc[common, "value"] + + +def build_optimagic_inputs( + params_template: pd.DataFrame, + fixed_params: pd.DataFrame | None, +) -> tuple[pd.DataFrame, list[om.constraints.Constraint]]: + """Prepare the params DataFrame and fixed-constraint list for `om.minimize`. + + The AF template encodes normalization fixes by clamping + ``lower_bound == upper_bound`` on affected rows. User-provided + `fixed_params` add further pinned rows. Both are translated into + `FixedConstraintWithValue` objects so optimagic can treat them uniformly + -- in particular so fixes that overlap a `ProbabilityConstraint` selector + get folded correctly. The returned DataFrame has infinite bounds on every + row that is pinned by a constraint, since optimagic rejects finite bounds + on probability selectors. + + Args: + params_template: AF parameter template with value/lower_bound/upper_bound. + fixed_params: Optional user-provided fixes (DataFrame with a "value" + column and the same 4-level MultiIndex as the template). + + Return: + Tuple of (full_params_df, fixed_constraints) where full_params_df + carries the template values plus any user fixes on all rows, and + fixed_constraints is a list of `FixedConstraintWithValue` objects + covering every pinned row (normalisation and user fixes alike). + + """ + params = params_template.copy() + + if fixed_params is not None: + common = params.index.intersection(fixed_params.index) + if not common.empty: + params.loc[common, "value"] = fixed_params.loc[common, "value"] + + fixed_from_bounds = ( + params["lower_bound"].to_numpy() == params["upper_bound"].to_numpy() + ) + fixed_from_user: np.ndarray + if fixed_params is not None: + common = params.index.intersection(fixed_params.index) + fixed_from_user = np.asarray(params.index.isin(common)) + else: + fixed_from_user = np.zeros(len(params), dtype=bool) + + pinned = fixed_from_bounds | fixed_from_user + + constraints: list[om.constraints.Constraint] = [] + for idx in params.index[pinned]: + constraints.append( + FixedConstraintWithValue( + loc=idx, + value=float(params.loc[idx, "value"]), + ) + ) + + # Relax bounds on pinned rows: optimagic rejects finite bounds that + # overlap a probability selector, and the FixedConstraint now does the + # pinning. + pinned_idx = params.index[pinned] + params.loc[pinned_idx, "lower_bound"] = -np.inf + params.loc[pinned_idx, "upper_bound"] = np.inf + + return params, constraints diff --git a/src/skillmodels/af/posterior_states.py b/src/skillmodels/af/posterior_states.py new file mode 100644 index 00000000..b6595de3 --- /dev/null +++ b/src/skillmodels/af/posterior_states.py @@ -0,0 +1,259 @@ +"""Compute posterior state estimates from AF estimation results. + +For each individual and period, compute E[theta_t | Z_{0:t,i}] using +Halton quadrature and the estimated conditional distributions. +""" + +from typing import Any + +import jax +import jax.numpy as jnp +import numpy as np +import pandas as pd +from jax import Array + +from skillmodels.af.halton import create_halton_nodes_and_weights +from skillmodels.af.initial_period import _build_loading_mask, _get_ordered_measures +from skillmodels.af.likelihood import _log_normal_pdf +from skillmodels.af.params import get_measurements_per_factor +from skillmodels.af.types import AFEstimationResult, ConditionalDistribution +from skillmodels.chs.process_debug_data import create_state_ranges +from skillmodels.common.model_spec import ModelSpec + + +def get_af_posterior_states( + af_result: AFEstimationResult, + model_spec: ModelSpec, + data: pd.DataFrame, + n_halton_points: int = 100, +) -> dict[str, dict[str, Any]]: + """Compute posterior state means from AF estimation results. + + For each individual i and period t, compute:: + + E[theta_t | Z_t,i] = sum_q w_q theta_q p(Z_t,i | theta_q) + / sum_q w_q p(Z_t,i | theta_q) + + where theta_q are quadrature nodes from the estimated conditional + distribution at period t, and p(Z_t,i | theta_q) is the measurement + density. + + Args: + af_result: Result from `estimate_af()`. + model_spec: Model specification. + data: Dataset in long format with MultiIndex (id, period). + n_halton_points: Quadrature points for posterior computation. + + Return: + Dict with "unanchored_states" containing "states" DataFrame + (columns: id, period, factor1, ...) and "state_ranges". + + """ + jax.config.update("jax_enable_x64", val=True) + + idx_names = data.index.names + id_col = str(idx_names[0]) + period_col = str(idx_names[1]) + + # Identify state factors from the conditional distribution dimension + n_state = af_result.conditional_distributions[0].components[0].mean.shape[0] + state_factors = tuple( + f for f in model_spec.factors if not model_spec.factors[f].is_endogenous + )[:n_state] + + rows: list[dict[str, float | int]] = [] + + for t, (period_result, cond_dist) in enumerate( + zip( + af_result.period_results, + af_result.conditional_distributions, + strict=True, + ) + ): + measurements_pt = get_measurements_per_factor(model_spec.factors, period=t) + if not measurements_pt: + continue + + meas_info = _extract_period_measurement_info( + period_result.params, + model_spec, + state_factors, + t, + ) + + period_mask = data.index.get_level_values(period_col) == t + period_df = data.loc[period_mask] + ids = period_df.index.get_level_values(id_col) + + all_measures = _get_ordered_measures(measurements_pt) + meas_cols = [c for c in all_measures if c in period_df.columns] + measurements = jnp.array( + period_df[meas_cols].to_numpy(dtype=np.float64, na_value=np.nan), + ) + + # Build per-observation control contribution + ctrl_arrays = [] + for ctrl in meas_info["control_names"]: + if ctrl == "constant": + ctrl_arrays.append(np.ones(len(period_df))) + elif ctrl in period_df.columns: + ctrl_arrays.append(period_df[ctrl].to_numpy(dtype=np.float64)) + else: + ctrl_arrays.append(np.zeros(len(period_df))) + controls = jnp.array(np.column_stack(ctrl_arrays)) + control_contrib = controls @ meas_info["control_params"].T + + nodes, weights = create_halton_nodes_and_weights(n_halton_points, n_state) + + posterior_means = _compute_posterior_means( + cond_dist=cond_dist, + measurements=measurements, + control_contrib=control_contrib, + full_loadings=meas_info["full_loadings"], + meas_sds=meas_info["meas_sds"], + nodes=nodes, + weights=weights, + ) + + for idx_i, obs_id in enumerate(ids): + row: dict[str, float | int] = {id_col: obs_id, "period": t} + for f_idx, factor in enumerate(state_factors): + row[factor] = float(posterior_means[idx_i, f_idx]) + rows.append(row) + + states_df = pd.DataFrame(rows) + state_ranges = create_state_ranges( + filtered_states=states_df, + factors=state_factors, + ) + + return { + "unanchored_states": { + "states": states_df, + "state_ranges": state_ranges, + }, + } + + +def _extract_period_measurement_info( + period_params: pd.DataFrame, + model_spec: ModelSpec, + factors: tuple[str, ...], + period: int, +) -> dict[str, Any]: + """Extract measurement loadings, control contribution, and SDs.""" + measurements_pt = get_measurements_per_factor(model_spec.factors, period=period) + all_measures = _get_ordered_measures(measurements_pt) + loading_mask = _build_loading_mask(all_measures, factors, measurements_pt) + + loadings_list = [] + for mi, meas in enumerate(all_measures): + for fi, factor in enumerate(factors): + if loading_mask[mi, fi]: + loc = ("loadings", period, meas, factor) + if loc in period_params.index: + loadings_list.append( + float(period_params.loc[loc, "value"]) # ty: ignore[invalid-argument-type] + ) + + full_loadings = jnp.zeros((len(all_measures), len(factors))) + full_loadings = full_loadings.at[jnp.array(loading_mask)].set( # noqa: PD008 + jnp.array(loadings_list) + ) + + # Extract ALL control coefficients (not just "constant") + ctrl_entries = period_params.loc[ + period_params.index.get_level_values("category") == "controls" + ] + ctrl_names = ( + sorted(set(ctrl_entries.index.get_level_values("name2"))) + if len(ctrl_entries) > 0 + else ["constant"] + ) + ctrl_params_list = [] + for meas in all_measures: + for ctrl in ctrl_names: + loc = ("controls", period, meas, ctrl) + if loc in period_params.index: + ctrl_params_list.append(float(period_params.loc[loc, "value"])) + else: + ctrl_params_list.append(0.0) + control_params = jnp.array(ctrl_params_list).reshape( + len(all_measures), len(ctrl_names) + ) + + sd_list = [ + float(period_params.loc[loc, "value"]) # ty: ignore[invalid-argument-type] + if (loc := ("meas_sds", period, meas, "-")) in period_params.index + else 0.5 + for meas in all_measures + ] + + return { + "full_loadings": full_loadings, + "control_params": control_params, + "control_names": ctrl_names, + "meas_sds": jnp.array(sd_list), + } + + +def _compute_posterior_means( + *, + cond_dist: ConditionalDistribution, + measurements: Array, + full_loadings: Array, + control_contrib: Array, + meas_sds: Array, + nodes: Array, + weights: Array, +) -> Array: + """Compute posterior means for all individuals at one period. + + Return shape (n_obs, n_factors). + """ + n_components = len(cond_dist.components) + means = jnp.stack([c.mean for c in cond_dist.components]) + chol_covs = jnp.stack([c.chol_cov for c in cond_dist.components]) + mix_weights = cond_dist.mixture_weights + + residuals_base = measurements - control_contrib + + def _single_obs(residual_base: Array) -> Array: + """Posterior mean for one individual.""" + + def _node_kernel(z_q: Array) -> tuple[Array, Array]: + """Return (log_weight, weighted_theta) for one quadrature node.""" + log_component_vals = [] + theta_components = [] + for l_idx in range(n_components): + theta = means[l_idx] + chol_covs[l_idx] @ z_q + residuals = residual_base - full_loadings @ theta + log_lik = jnp.sum( + _log_normal_pdf( + residuals, + jnp.zeros_like(residuals), + meas_sds, + ) + ) + log_component_vals.append( + jnp.log(mix_weights[l_idx] + 1e-300) + log_lik + ) + theta_components.append(theta) + + log_w = jax.scipy.special.logsumexp(jnp.array(log_component_vals)) + # Weighted theta across mixture components + comp_weights = jax.nn.softmax(jnp.array(log_component_vals)) + avg_theta = jnp.zeros_like(theta_components[0]) + for cw, tv in zip(comp_weights, theta_components, strict=True): + avg_theta = avg_theta + cw * tv + return log_w, avg_theta + + log_ws, thetas = jax.vmap(_node_kernel)(nodes) + + # Posterior weights: softmax of log_ws + log(quadrature_weights) + log_posterior = log_ws + jnp.log(weights) + posterior_weights = jax.nn.softmax(log_posterior) + + return jnp.sum(posterior_weights[:, None] * thetas, axis=0) + + return jax.vmap(_single_obs)(residuals_base) diff --git a/src/skillmodels/af/transition_period.py b/src/skillmodels/af/transition_period.py new file mode 100644 index 00000000..48f71c28 --- /dev/null +++ b/src/skillmodels/af/transition_period.py @@ -0,0 +1,1228 @@ +"""Step t (t >= 1) of the AF estimator: transition period estimation. + +Estimate transition function parameters and measurement system parameters +using Halton quadrature over the latent factor distribution from the +previous period. +""" + +import inspect +from collections.abc import Callable, Mapping + +import jax +import jax.numpy as jnp +import numpy as np +import optimagic as om +import pandas as pd +from jax import Array + +from skillmodels.af.batching import auto_n_obs_per_batch +from skillmodels.af.halton import create_halton_nodes_and_weights +from skillmodels.af.initial_period import _build_loading_mask, _get_ordered_measures +from skillmodels.af.likelihood import af_loglike_transition, create_loglike_and_gradient +from skillmodels.af.params import ( + apply_fixed_params, + apply_start_params, + build_optimagic_inputs, + create_af_params_template, + get_measurements_per_factor, + get_normalizations_for_period, + get_transition_period_params_index, +) +from skillmodels.af.types import ( + AFEstimationOptions, + AFPeriodResult, + ChainLink, + ConditionalDistribution, + MixtureComponent, +) +from skillmodels.amn.moments import ( + SpearmanResult, + seed_beta_from_ols, + spearman_factor_moments, +) +from skillmodels.common.model_spec import ModelSpec +from skillmodels.common.types import ProcessedModel, TransitionInfo + + +def estimate_transition_period( + period: int, + model_spec: ModelSpec, + processed_model: ProcessedModel, + measurements: Array, + controls: Array, + prev_measurements: Array, + prev_controls: Array, + prev_period_params: pd.DataFrame, + prev_distribution: ConditionalDistribution, + af_options: AFEstimationOptions, + endogenous_factors: tuple[str, ...] = (), + observed_factors: tuple[str, ...] = (), + observed_factor_data: Array | None = None, + start_params: pd.DataFrame | None = None, + fixed_params: pd.DataFrame | None = None, +) -> tuple[AFPeriodResult, ConditionalDistribution]: + """Estimate a transition period (Step t, t >= 1) of the AF procedure. + + Given the estimated distribution of latent factors from previous periods, + estimate the transition function parameters and measurement system + parameters for the current period via MLE with Halton quadrature. + + Args: + period: Calendar period index (t >= 1). + model_spec: Model specification. + processed_model: Processed model from `process_model()`. + measurements: Shape (n_obs, n_measures), period t measurement values. + controls: Shape (n_obs, n_controls), period t control values. + prev_measurements: Shape (n_obs, n_prev_measures), period t-1 measurements. + prev_controls: Shape (n_obs, n_prev_controls), period t-1 controls. + prev_period_params: Estimated params DataFrame from period t-1. + prev_distribution: Estimated conditional distribution from period t-1. + af_options: AF estimation options. + endogenous_factors: Names of endogenous (investment) factors. + observed_factors: Names of observed (non-latent) factors. + observed_factor_data: Shape (n_obs, n_obs_factors), observed factor + values. Required when `observed_factors` is non-empty. + start_params: Optional starting values. Matching index entries + override heuristic defaults. + fixed_params: Optional DataFrame with a "value" column pinning + specified parameters (value + bounds both clamped to the value). + + Return: + Tuple of (AFPeriodResult, ConditionalDistribution) where the + distribution represents f(theta_t | data_{0:t}). + + """ + factors = processed_model.labels.latent_factors + controls_names = processed_model.labels.controls + + measurements_pt = get_measurements_per_factor(model_spec.factors, period=period) + all_measures = _get_ordered_measures(measurements_pt) + + # Get transition function info + # For now, use the first non-constant factor's transition for the combined function + transition_info = processed_model.transition_info + + # Separate state factors from endogenous for the parameter index + state_factors = tuple(f for f in factors if f not in endogenous_factors) + n_state = len(state_factors) + n_endog = len(endogenous_factors) + shock_factors = tuple( + f for f in state_factors if model_spec.factors[f].has_production_shock + ) + n_shock = len(shock_factors) + shock_factor_indices = jnp.array( + [state_factors.index(f) for f in shock_factors], dtype=jnp.int32 + ) + # Indices of the state factors within the full latent-factor ordering. + # `prev_full_loadings` has columns in `factors` order (state + + # endogenous, possibly interleaved); the prev-meas factor restricts to + # state-factor columns to mirror MATLAB's likelihood_12 (which omits + # period-(t-1) inv measurements from the chained-sample importance + # weight). Build the mapping explicitly rather than relying on + # state-before-endogenous ordering. + state_factor_indices_in_latent = jnp.array( + [factors.index(f) for f in state_factors], dtype=jnp.int32 + ) + + params_index = get_transition_period_params_index( + period=period, + latent_factors=state_factors, + transition_info=transition_info, + measurements_at_period=measurements_pt, + controls=controls_names, + endogenous_factors=endogenous_factors, + observed_factors=observed_factors, + shock_factors=shock_factors, + ) + normalizations = get_normalizations_for_period(model_spec.factors, period=period) + params_template = create_af_params_template( + params_index, + normalizations, + period=period, + ) + + params_template = _initialize_transition_params( + params_template, + measurements, + start_params, + fixed_params, + period=period, + model_spec=model_spec, + state_factors=state_factors, + endogenous_factors=endogenous_factors, + observed_factors=observed_factors, + observed_factor_data=observed_factor_data, + prev_measurements=prev_measurements, + af_options=af_options, + normalizations=normalizations, + ) + + # Collect transition function constraints (only for state factors' transitions) + transition_constraints = _collect_transition_constraints( + transition_info, + state_factors, + processed_model.labels.all_factors, + period, + ) + + _seed_probability_start_values( + params_template, transition_constraints, fixed_params + ) + + # Build loading mask + loading_mask = _build_loading_mask(all_measures, factors, measurements_pt) + + # JOINT Halton design covering ALL randomness needed at this step, + # mirroring MATLAB's `create_nodes_weights_01/12`. The chained sample + # θ_0 → θ_{period-1} is rebuilt on-demand inside the integrand from + # this single joint sequence (see `_rebuild_chain_at_period` in + # `af/likelihood.py` and the obsidian note + # `sigma-prod-collapse-2026-05-07.md` for why this matters). + # + # Layout of joint_nodes[j]: + # [:n_state] -- z_state for θ_0 + # for s in 0..period-2: -- prior chain steps + # [n_state+s*zb : n_state+s*zb+n_shock] -- z_P at period s+1 + # [...n_shock+n_endog] -- z_inv at period s+1 + # [tail: n_shock] -- z_P at current step (period) + # [tail: n_endog] -- z_inv at current step (period) + # + # Seed the Halton design with the period index. Each step draws an + # independent low-discrepancy sequence; the joint structure within a + # step delivers proper quasi-uniform 3D+ coverage (vs. the previous + # split scheme which paired two independent sequences at the same j). + n_chain = period - 1 # number of prior transition steps already estimated + z_block = n_shock + n_endog + joint_dim = n_state + n_chain * z_block + z_block + joint_nodes, joint_weights = create_halton_nodes_and_weights( + af_options.n_halton_points, + joint_dim, + seed=period, + ) + + prev_dist_arrays, total_n_transition_params = _prepare_transition_inputs( + prev_distribution, + transition_info, + state_factors, + measurements.shape[0], + ) + + # Build combined transition from raw transition functions. + # Only state factors have transitions; endogenous factors use the investment eq. + raw_funcs = _get_raw_transition_functions( + model_spec, + state_factors, + all_factors=processed_model.labels.all_factors, + param_names=transition_info.param_names, + ) + param_counts = tuple(len(transition_info.param_names[f]) for f in state_factors) + + def combined_transition( + full_states: Array, + params: Array, + ) -> Array: + """Apply per-factor transitions.""" + result = jnp.zeros(n_state) + p_idx = 0 + for i in range(n_state): + n_p = param_counts[i] + factor_params = params[p_idx : p_idx + n_p] + result = result.at[i].set( # noqa: PD008 + raw_funcs[i](full_states, factor_params) + ) + p_idx += n_p + return result + + # Count investment equation params (per endogenous factor: intercept + state + obs) + n_inv_eq_params_per = 1 + n_state + len(observed_factors) if n_endog > 0 else 0 + total_n_inv_params = n_endog * n_inv_eq_params_per + + # Observed factor values for investment equation (from previous period) + n_obs_fac = len(observed_factors) + obs_factor_values = ( + observed_factor_data + if observed_factor_data is not None + else jnp.zeros((measurements.shape[0], n_obs_fac)) + ) + + # Carry forward chain links from prior transition steps for the + # joint-Halton chain rebuild. The period-0→1 step has chain_links == (). + chain_links = prev_distribution.chain_links + + # Per-obs observed factors at the source period of each chain link + # (period 0 for link 0, period 1 for link 1, ...). Stack across + # links into shape (n_obs, n_chain, n_obs_factors). Each ChainLink + # already carries its own period's `obs_factor_values` internally; + # extract them here in obs-major order to match the per-obs map in + # `_transition_loglike_per_obs`. + if len(chain_links) == 0: + obs_factor_values_chain = jnp.zeros((measurements.shape[0], 0, n_obs_fac)) + else: + obs_factor_values_chain = jnp.stack( + [link.obs_factor_values for link in chain_links], axis=1 + ) + + result_params, opt_res = _run_transition_optimization( + params_template=params_template, + prev_period_params=prev_period_params, + model_spec=model_spec, + factors=factors, + period=period, + n_state=n_state, + n_endog=n_endog, + n_shock=n_shock, + shock_factor_indices=shock_factor_indices, + state_factor_indices_in_latent=state_factor_indices_in_latent, + all_measures=all_measures, + controls_names=controls_names, + measurements=measurements, + controls=controls, + prev_measurements=prev_measurements, + prev_controls=prev_controls, + loading_mask=loading_mask, + prev_dist_arrays=prev_dist_arrays, + chain_links=chain_links, + obs_factor_values_chain=obs_factor_values_chain, + joint_nodes=joint_nodes, + joint_weights=joint_weights, + combined_transition=combined_transition, + total_n_transition_params=total_n_transition_params, + total_n_inv_params=total_n_inv_params, + n_inv_eq_params_per=n_inv_eq_params_per, + obs_factor_values=obs_factor_values, + af_options=af_options, + transition_constraints=transition_constraints, + fixed_params=fixed_params, + ) + + # Build the next ChainLink from the just-fitted period parameters and + # append it to the chain history. Future transition steps will replay + # this link as part of their joint-Halton chain rebuild. + new_link = _build_chain_link( + period=period, + result_params=result_params, + combined_transition=combined_transition, + shock_factor_indices=shock_factor_indices, + n_inv_eq_params_per=n_inv_eq_params_per, + obs_factor_values=obs_factor_values, + ) + new_chain_links = (*chain_links, new_link) + + # Build the importance-sample SUMMARY (mean, chol_cov per component) + # for posterior-state extraction. This path is no longer load-bearing + # for the transition likelihood (rebuilt on-demand from joint Halton), + # but `posterior_states.py` still consumes the per-component summary + # statistics derived from the chained sample. + updated_dist = _update_conditional_distribution( + prev_distribution=prev_distribution, + result_params=result_params, + combined_transition=combined_transition, + joint_nodes=joint_nodes, + n_state=n_state, + n_endog=n_endog, + n_shock=n_shock, + shock_factor_indices=shock_factor_indices, + observed_factor_values=obs_factor_values, + n_observed_factors=len(observed_factors), + ) + # Carry the accumulated chain history forward. + updated_dist = _replace_chain_links(updated_dist, new_chain_links) + + period_result = AFPeriodResult( + period=period, + params=result_params, + loglikelihood=-float(opt_res.fun), + success=bool(opt_res.success), + optimize_result=opt_res, + ) + + return period_result, updated_dist + + +def _run_transition_optimization( + *, + params_template: pd.DataFrame, + prev_period_params: pd.DataFrame, + model_spec: ModelSpec, + factors: tuple[str, ...], + period: int, + n_state: int, + n_endog: int, + n_shock: int, + shock_factor_indices: Array, + state_factor_indices_in_latent: Array, + all_measures: list[str], + controls_names: tuple[str, ...], + measurements: Array, + controls: Array, + prev_measurements: Array, + prev_controls: Array, + loading_mask: np.ndarray, + prev_dist_arrays: dict[str, Array], + chain_links: tuple[ChainLink, ...], + obs_factor_values_chain: Array, + joint_nodes: Array, + joint_weights: Array, + combined_transition: Callable, + total_n_transition_params: int, + total_n_inv_params: int, + n_inv_eq_params_per: int, + obs_factor_values: Array, + af_options: AFEstimationOptions, + transition_constraints: list[om.constraints.Constraint], + fixed_params: pd.DataFrame | None, +) -> tuple[pd.DataFrame, om.OptimizeResult]: + """Build likelihood, run the optimizer, and return updated params. + + Handle the mechanical optimization setup: construct the log-likelihood + keyword arguments, create the jitted value-and-gradient function, build + the params DataFrame + constraint list, and call `om.minimize`. + + Return: + Tuple of (result_params DataFrame, OptimizeResult). + + """ + full_params_df, fixed_constraints = build_optimagic_inputs( + params_template, fixed_params + ) + + prev_meas_info = _extract_prev_measurement_params( + prev_period_params, + model_spec, + factors, + period - 1, + ) + + n_obs_per_batch = af_options.n_obs_per_batch + if n_obs_per_batch is None: + n_obs_per_batch = auto_n_obs_per_batch( + n_obs=int(measurements.shape[0]), + n_halton_points=af_options.n_halton_points, + n_halton_points_shock=af_options.n_halton_points_shock, + n_latent=n_state, + n_endogenous=n_endog, + ) + + loglike_kwargs = { + "n_state_factors": n_state, + "n_endogenous_factors": n_endog, + "n_shock_factors": n_shock, + "shock_factor_indices": shock_factor_indices, + "state_factor_indices_in_latent": state_factor_indices_in_latent, + "n_measures": len(all_measures), + "n_controls": len(controls_names), + "measurements": measurements, + "controls": controls, + "loading_mask": jnp.array(loading_mask), + "prev_measurements": prev_measurements, + "prev_controls": prev_controls, + "prev_loading_mask": prev_meas_info["loading_mask"], + "prev_control_params": prev_meas_info["control_params"], + "prev_loadings_flat": prev_meas_info["loadings_flat"], + "prev_meas_sds": prev_meas_info["meas_sds"], + "prev_distribution": prev_dist_arrays, + "chain_links": chain_links, + "obs_factor_values_chain": obs_factor_values_chain, + "joint_nodes": joint_nodes, + "joint_weights": joint_weights, + "transition_func": combined_transition, + "total_n_transition_params": total_n_transition_params, + "total_n_inv_params": total_n_inv_params, + "n_inv_eq_params_per": n_inv_eq_params_per, + "observed_factor_values": obs_factor_values, + "stability_floor": af_options.stability_floor, + "n_obs_per_batch": n_obs_per_batch, + } + + loglike_and_grad = create_loglike_and_gradient( + af_loglike_transition, + **loglike_kwargs, + ) + + def fun(params_df: pd.DataFrame) -> float: + val, _grad = loglike_and_grad(jnp.array(params_df["value"].to_numpy())) + return float(val) + + def fun_and_jac(params_df: pd.DataFrame) -> tuple[float, np.ndarray]: + val, grad = loglike_and_grad(jnp.array(params_df["value"].to_numpy())) + return float(val), np.array(grad) + + combined_constraints = list(transition_constraints) + list(fixed_constraints) + + opt_res = om.minimize( + fun=fun, + params=full_params_df[["value"]], + algorithm=af_options.optimizer_algorithm, + bounds=om.Bounds( + lower=full_params_df["lower_bound"], + upper=full_params_df["upper_bound"], + ), + constraints=combined_constraints or None, + fun_and_jac=fun_and_jac, + **dict(af_options.optimizer_options), + ) + + result_params = params_template.copy() + result_params["value"] = opt_res.params["value"].to_numpy() + + return result_params, opt_res + + +def _collect_transition_constraints( + transition_info: TransitionInfo, + factors: tuple[str, ...], + all_factors: tuple[str, ...], + period: int, +) -> list[om.constraints.Constraint]: + """Collect transition function constraints for the AF optimizer. + + Look for `constraints_{function_name}()` in `transition_functions.py`, + mirroring how CHS collects them in `constraints.py`. + """ + import skillmodels.common.transition_functions as tf_mod # noqa: PLC0415 + + constraints: list[om.constraints.Constraint] = [] + for factor in factors: + if factor not in transition_info.function_names: + continue + fname = transition_info.function_names[factor] + constraint_fn = getattr(tf_mod, f"constraints_{fname}", None) + if constraint_fn is not None: + constraints.append( + constraint_fn( + factor=factor, + factors=all_factors, + aug_period=period - 1, + ) + ) + return constraints + + +def _extract_prev_measurement_params( + prev_params: pd.DataFrame, + model_spec: ModelSpec, + factors: tuple[str, ...], + prev_period: int, +) -> dict[str, Array]: + """Extract estimated measurement params from the previous period. + + These are used as fixed (known) values when conditioning the transition + likelihood on individual-specific previous-period data. + """ + measurements_prev = get_measurements_per_factor( + model_spec.factors, period=prev_period + ) + all_prev_measures = _get_ordered_measures(measurements_prev) + loading_mask = _build_loading_mask(all_prev_measures, factors, measurements_prev) + + # Extract loadings (packed, in order of the mask) + loadings_list = [] + for mi, meas in enumerate(all_prev_measures): + for fi, factor in enumerate(factors): + if loading_mask[mi, fi]: + loc = ("loadings", prev_period, meas, factor) + if loc in prev_params.index: + loadings_list.append( + float(prev_params.loc[loc, "value"]) # ty: ignore[invalid-argument-type] + ) + + # Extract control params + ctrl_entries = prev_params.loc[ + prev_params.index.get_level_values("category") == "controls" + ] + ctrl_names = ( + sorted(set(ctrl_entries.index.get_level_values("name2"))) + if len(ctrl_entries) > 0 + else ["constant"] + ) + ctrl_params_list = _collect_ctrl_params( + prev_params, + all_prev_measures, + ctrl_names, + prev_period, + ) + control_params = jnp.array(ctrl_params_list).reshape( + len(all_prev_measures), len(ctrl_names) + ) + + # Extract measurement SDs + meas_sds_list = [] + for meas in all_prev_measures: + loc = ("meas_sds", prev_period, meas, "-") + if loc in prev_params.index: + meas_sds_list.append( + float(prev_params.loc[loc, "value"]) # ty: ignore[invalid-argument-type] + ) + + return { + "loading_mask": jnp.array(loading_mask), + "loadings_flat": jnp.array(loadings_list), + "control_params": control_params, + "meas_sds": jnp.array(meas_sds_list), + } + + +def _collect_ctrl_params( + prev_params: pd.DataFrame, + measures: list[str], + ctrl_names: list[str], + prev_period: int, +) -> list[float]: + """Collect control parameter values from the previous period's estimate.""" + result = [] + for meas in measures: + for ctrl in ctrl_names: + loc = ("controls", prev_period, meas, ctrl) + if loc in prev_params.index: + result.append( + float(prev_params.loc[loc, "value"]) # ty: ignore[invalid-argument-type] + ) + else: + result.append(0.0) + return result + + +def _get_raw_transition_functions( + model_spec: ModelSpec, + factors: tuple[str, ...], + *, + all_factors: tuple[str, ...], + param_names: Mapping[str, tuple[str, ...]], +) -> tuple[Callable, ...]: + """Get the raw (non-vmapped) transition functions for each factor. + + Returns callables with a uniform `(states, params_array) -> scalar` + signature for use inside JIT-compiled code. Built-in transitions + from `transition_functions.py` already match that signature; + `@register_params`-decorated user functions take individual factor + arguments plus a `params` dict, so they are wrapped here to convert + from AF's packed representation. + """ + import skillmodels.common.transition_functions as tf_mod # noqa: PLC0415 + + funcs: list[Callable] = [] + for factor in factors: + spec = model_spec.factors[factor] + tf = spec.transition_function + if isinstance(tf, str): + funcs.append(getattr(tf_mod, tf)) + elif callable(tf): + if hasattr(tf, "__registered_params__"): + funcs.append( + _wrap_registered_transition_function( + tf, + all_factors=all_factors, + param_names=tuple(param_names[factor]), + ) + ) + else: + funcs.append(tf) + else: + msg = f"Factor '{factor}': no transition function specified." + raise TypeError(msg) + return tuple(funcs) + + +def _wrap_registered_transition_function( + user_func: Callable, + *, + all_factors: tuple[str, ...], + param_names: tuple[str, ...], +) -> Callable: + """Bridge `@register_params` user functions to AF's `(states, params)` convention. + + A user-defined transition function takes one positional argument + per factor it consumes (matching factor names in `all_factors`) + plus a final `params` dict keyed by `__registered_params__`. AF's + `combined_transition`, in contrast, supplies a packed state vector + and a flat parameter slice. This wrapper looks up each consumed + factor's position in `all_factors`, slices `states` accordingly, + rebuilds the `params` dict, and forwards the call. + """ + sig = inspect.signature(user_func) + arg_names = [name for name in sig.parameters if name != "params"] + arg_positions = tuple(all_factors.index(name) for name in arg_names) + + def wrapped(states: Array, factor_params: Array) -> Array: + kwargs: dict[str, Array | dict[str, Array]] = { + name: states[pos] + for name, pos in zip(arg_names, arg_positions, strict=True) + } + kwargs["params"] = dict(zip(param_names, factor_params, strict=True)) + return user_func(**kwargs) + + return wrapped + + +def _prepare_transition_inputs( + prev_distribution: ConditionalDistribution, + transition_info: TransitionInfo, + factors: tuple[str, ...], + n_obs: int, +) -> tuple[dict[str, Array], int]: + """Pack the period-0 conditional distribution payload for the likelihood. + + Returns a dict the transition likelihood reads to seed its on-demand + chain rebuild from a joint Halton draw. The chain is rebuilt fresh at + every likelihood call from the period-0 cond_means/cond_chols plus + the carried `chain_links` (handled separately); no static + chained-sample carry-over is consumed here. + + Return: + Tuple of (prev_dist_arrays dict, n_transition_params). The dict + contains keys "cond_weights" (per-obs Bayes-posterior mixture + weights), "cond_means" (per-component, per-obs Schur-conditional + means at period 0), and "cond_chols" (per-component + Schur-conditional Cholesky factors at period 0). + + """ + n_components = len(prev_distribution.components) + + if prev_distribution.conditional_weights is not None: + cond_weights = prev_distribution.conditional_weights + else: + cond_weights = jnp.broadcast_to( + prev_distribution.mixture_weights[None, :], + (n_obs, n_components), + ) + + if prev_distribution.cond_means is None or prev_distribution.cond_chols is None: + msg = ( + "prev_distribution must carry cond_means and cond_chols (the " + "period-0 Schur-conditional payload). Initial period must be " + "estimated before any transition step." + ) + raise ValueError(msg) + + prev_dist_arrays = { + "cond_weights": cond_weights, + "cond_means": prev_distribution.cond_means, + "cond_chols": prev_distribution.cond_chols, + } + + total_n_transition_params = sum( + len(transition_info.param_names[f]) + for f in factors + if f in transition_info.param_names + ) + + return prev_dist_arrays, total_n_transition_params + + +def _seed_probability_start_values( + params_template: pd.DataFrame, + transition_constraints: list[om.constraints.Constraint], + fixed_params: pd.DataFrame | None, +) -> None: + """Seed start values for probability-constrained selectors. + + Distribute ``1 - sum(fixed_values)`` uniformly over the unfixed entries + so the simplex sums to one before optimization. + """ + fixed_loc = set(fixed_params.index) if fixed_params is not None else set() + for constr in transition_constraints: + if not isinstance(constr, om.ProbabilityConstraint): + continue + prob_idx = constr.selector(params_template[["value"]]).index + fixed_mask = prob_idx.isin(fixed_loc) + fixed_sum = ( + float(params_template.loc[prob_idx[fixed_mask], "value"].sum()) + if fixed_mask.any() + else 0.0 + ) + free_prob_idx = prob_idx[~fixed_mask] + if len(free_prob_idx) > 0: + params_template.loc[free_prob_idx, "value"] = (1.0 - fixed_sum) / len( + free_prob_idx + ) + + +def _initialize_transition_params( + params_template: pd.DataFrame, + measurements: Array, + start_params: pd.DataFrame | None = None, + fixed_params: pd.DataFrame | None = None, + *, + period: int | None = None, + model_spec: ModelSpec | None = None, + state_factors: tuple[str, ...] = (), + endogenous_factors: tuple[str, ...] = (), + observed_factors: tuple[str, ...] = (), + observed_factor_data: Array | None = None, + prev_measurements: Array | None = None, + af_options: AFEstimationOptions | None = None, + normalizations: dict[str, dict[tuple[str, str], float]] | None = None, +) -> pd.DataFrame: + """Initialize transition period parameters with reasonable defaults. + + If `start_params` is provided, matching entries override the defaults. + If `fixed_params` is provided, matching entries are pinned (value + + bounds clamped). + + When ``af_options.initialization_strategy == "spearman"``, run + Spearman cross-covariance estimation per factor at the current period + and seed loadings, sigma_meas, sigma_shock, sigma_inv, and inv-equation β from + those moments. Falls back to the static defaults below for any factor + with fewer than two measurements or where Spearman identification is + degenerate. + """ + params = params_template.copy() + meas_np = np.array(measurements) + + # Transition params: small values (near identity) + trans_mask = params.index.get_level_values("category") == "transition" + for idx in params.index[trans_mask]: + if params.loc[idx, "lower_bound"] != params.loc[idx, "upper_bound"]: + # Set linear terms close to identity + params.loc[idx, "value"] = 0.5 + + # Shock SDs: moderate + shock_mask = params.index.get_level_values("category") == "shock_sds" + params.loc[shock_mask, "value"] = 0.5 + + # Measurement SDs from data + sd_mask = params.index.get_level_values("category") == "meas_sds" + for i, idx in enumerate(params.index[sd_mask]): + if i < meas_np.shape[1]: + obs_sd = float(np.nanstd(meas_np[:, i])) + params.loc[idx, "value"] = max(obs_sd * 0.5, 0.01) + + # Loadings to 1.0 where free + load_mask = params.index.get_level_values("category") == "loadings" + for idx in params.index[load_mask]: + if params.loc[idx, "lower_bound"] != params.loc[idx, "upper_bound"]: + params.loc[idx, "value"] = 1.0 + + # Optional moment-based override: seed loadings / sigma_meas / sigma_shock / + # sigma_inv from Spearman cross-covariances of the current-period + # measurements. This puts the optimizer near the strongly-identified + # MLE neighborhood; for sigma_inv_0 specifically, this is the difference + # between converging at truth and drifting to the lower bound along + # the sigma_inv / sigma_meas constant-Var ridge. + if ( + af_options is not None + and af_options.initialization_strategy == "spearman" + and model_spec is not None + and period is not None + ): + params = _apply_moment_based_overrides_transition( + params, + measurements, + prev_measurements=prev_measurements, + observed_factor_data=observed_factor_data, + model_spec=model_spec, + period=period, + state_factors=state_factors, + endogenous_factors=endogenous_factors, + observed_factors=observed_factors, + normalizations=normalizations or {}, + ) + + if start_params is not None: + apply_start_params(params, start_params) + + if fixed_params is not None: + apply_fixed_params(params, fixed_params) + + return params + + +def _apply_moment_based_overrides_transition( # noqa: C901, PLR0912, PLR0915 + params: pd.DataFrame, + measurements: Array, + *, + prev_measurements: Array | None, + observed_factor_data: Array | None, + model_spec: ModelSpec, + period: int, + state_factors: tuple[str, ...], + endogenous_factors: tuple[str, ...], + observed_factors: tuple[str, ...], + normalizations: dict[str, dict[tuple[str, str], float]], +) -> pd.DataFrame: + """Override transition-period params with Spearman cross-cov moments. + + For each factor with at least two measurements at the current period, + run `spearman_factor_moments` and write back loadings, sigma_meas, and + derive a starting sigma_shock (state factors) or sigma_inv (endogenous factors) + from the latent variance. Investment-equation β coefficients are seeded + via OLS of the endogenous-factor anchor measurement on the prev-period + state anchor measurements plus the observed factors. + """ + out = params.copy() + meas_np = np.array(measurements) + measurements_pt = get_measurements_per_factor(model_spec.factors, period=period) + all_measures = _get_ordered_measures(measurements_pt) + meas_index = {m: i for i, m in enumerate(all_measures)} + loading_norms = normalizations.get("loadings", {}) + + spearman_results: dict[str, SpearmanResult] = {} + + for factor, factor_meas in measurements_pt.items(): + if len(factor_meas) < 2: + continue + cols = [meas_index[m] for m in factor_meas if m in meas_index] + if len(cols) < 2: + continue + if max(cols) >= meas_np.shape[1]: + continue + sub = meas_np[:, cols] + + anchor_loading = 1.0 + anchor_local = 0 + for local_idx, meas_name in enumerate(factor_meas): + if (meas_name, factor) in loading_norms: + anchor_local = local_idx + anchor_loading = float(loading_norms[(meas_name, factor)]) + break + + result = spearman_factor_moments( + sub, + anchor_idx=anchor_local, + anchor_loading=anchor_loading, + ) + if not result.valid: + continue + spearman_results[factor] = result + + # Override loadings (skip pinned rows). + for local_idx, meas_name in enumerate(factor_meas): + loc = ("loadings", period, meas_name, factor) + if loc not in out.index: + continue + if out.loc[loc, "lower_bound"] != out.loc[loc, "upper_bound"]: + out.loc[loc, "value"] = float(result.loadings[local_idx]) + + # Override measurement SDs (skip pinned rows). + for local_idx, meas_name in enumerate(factor_meas): + loc = ("meas_sds", period, meas_name, "-") + if loc not in out.index: + continue + if out.loc[loc, "lower_bound"] != out.loc[loc, "upper_bound"]: + out.loc[loc, "value"] = float(result.meas_sds[local_idx]) + + # Seed shock_sds (state factors) and investment_sds (endogenous + # factors), and the investment equation's β coefficients, via OLS of + # the current-period anchor measurement on the prev-period state + # anchors plus observed factors. The OLS residual variance gives + # sigma_shock² + sigma_meas² (state) or sigma_inv² + sigma_meas² (endogenous); + # subtracting sigma_meas² gives a clean starting point for the latent + # shock SD that correctly accounts for variance explained by + # observed factors and the prev state. (Without this subtraction + # the seed is dominated by observed-factor variance, which can make + # sigma_inv start orders of magnitude above truth.) + if prev_measurements is not None and len(state_factors) > 0: + prev_meas_np = np.array(prev_measurements) + prev_measurements_pt = get_measurements_per_factor( + model_spec.factors, period=period - 1 + ) + prev_all_measures = _get_ordered_measures(prev_measurements_pt) + prev_meas_index = {m: i for i, m in enumerate(prev_all_measures)} + + state_anchor_cols: list[int] = [] + for sf in state_factors: + sf_meas = prev_measurements_pt.get(sf, ()) + if not sf_meas or sf_meas[0] not in prev_meas_index: + state_anchor_cols.append(-1) + continue + state_anchor_cols.append(prev_meas_index[sf_meas[0]]) + + obs_data = ( + np.array(observed_factor_data) + if observed_factor_data is not None and len(observed_factors) > 0 + else np.zeros((prev_meas_np.shape[0], 0)) + ) + + anchors_ok = all(c >= 0 for c in state_anchor_cols) + + # Seed sigma_shock for each state factor: residual variance of + # OLS(Z_state_anchor_t ~ Z_state_anchor_{t-1}, observed) minus + # sigma_meas². + if anchors_ok: + state_anchor_data = prev_meas_np[:, state_anchor_cols] + regressors_state = np.column_stack([state_anchor_data, obs_data]) + for sf in state_factors: + if sf not in spearman_results: + continue + sf_meas = measurements_pt.get(sf, ()) + if not sf_meas: + continue + anchor_idx = meas_index.get(sf_meas[0]) + if anchor_idx is None: + continue + response = meas_np[:, anchor_idx] + if response.shape[0] != regressors_state.shape[0]: + continue + beta_hat = seed_beta_from_ols(response, regressors_state) + if not np.all(np.isfinite(beta_hat)): + continue + fitted = regressors_state @ beta_hat + resid = response - fitted + resid_finite = resid[np.isfinite(resid)] + if resid_finite.size < 2: + continue + resid_var = float(np.var(resid_finite, ddof=1)) + sigma_meas_anchor = float(spearman_results[sf].meas_sds[0]) + seed_sd = float(np.sqrt(max(resid_var - sigma_meas_anchor**2, 1e-6))) + loc = ("shock_sds", period - 1, sf, "-") + if ( + loc in out.index + and out.loc[loc, "lower_bound"] != out.loc[loc, "upper_bound"] + ): + out.loc[loc, "value"] = seed_sd + + # Seed sigma_inv and inv-equation β for each endogenous factor. β goes + # from OLS coefs (the same regression used for the sigma_inv residual). + if anchors_ok and len(endogenous_factors) > 0: + state_anchor_data = prev_meas_np[:, state_anchor_cols] + regressors_inv = np.column_stack([state_anchor_data, obs_data]) + for ef in endogenous_factors: + if ef not in spearman_results: + continue + ef_meas = measurements_pt.get(ef, ()) + if not ef_meas: + continue + ef_anchor_idx = meas_index.get(ef_meas[0]) + if ef_anchor_idx is None: + continue + response = meas_np[:, ef_anchor_idx] + if response.shape[0] != regressors_inv.shape[0]: + continue + beta_hat = seed_beta_from_ols(response, regressors_inv) + if not np.all(np.isfinite(beta_hat)): + continue + fitted = regressors_inv @ beta_hat + resid = response - fitted + resid_finite = resid[np.isfinite(resid)] + if resid_finite.size < 2: + continue + resid_var = float(np.var(resid_finite, ddof=1)) + sigma_meas_anchor = float(spearman_results[ef].meas_sds[0]) + seed_sd = float(np.sqrt(max(resid_var - sigma_meas_anchor**2, 1e-6))) + loc = ("investment_sds", period - 1, ef, "-") + if ( + loc in out.index + and out.loc[loc, "lower_bound"] != out.loc[loc, "upper_bound"] + ): + out.loc[loc, "value"] = seed_sd + + # Write β into inv_eq rows. + state_betas = beta_hat[: len(state_factors)] + obs_betas = beta_hat[len(state_factors) :] + for sf, b in zip(state_factors, state_betas, strict=True): + loc = ("investment_eq", period - 1, ef, sf) + if ( + loc in out.index + and out.loc[loc, "lower_bound"] != out.loc[loc, "upper_bound"] + ): + out.loc[loc, "value"] = float(b) + for of, b in zip(observed_factors, obs_betas, strict=True): + loc = ("investment_eq", period - 1, ef, of) + if ( + loc in out.index + and out.loc[loc, "lower_bound"] != out.loc[loc, "upper_bound"] + ): + out.loc[loc, "value"] = float(b) + + return out + + +def _replace_chain_links( + cond_dist: ConditionalDistribution, + chain_links: tuple[ChainLink, ...], +) -> ConditionalDistribution: + """Return a new ConditionalDistribution with `chain_links` replaced. + + Used by `estimate_transition_period` to carry the accumulated chain + history forward (one extra `ChainLink` per estimated transition). + """ + return ConditionalDistribution( + mixture_weights=cond_dist.mixture_weights, + components=cond_dist.components, + samples_per_component=cond_dist.samples_per_component, + conditional_weights=cond_dist.conditional_weights, + cond_means=cond_dist.cond_means, + cond_chols=cond_dist.cond_chols, + chain_links=chain_links, + ) + + +def _build_chain_link( + *, + period: int, + result_params: pd.DataFrame, + combined_transition: Callable, + shock_factor_indices: Array, + n_inv_eq_params_per: int, + obs_factor_values: Array, +) -> ChainLink: + """Pack a freshly-fitted period's parameters into a ChainLink. + + The resulting `ChainLink` is appended to the carried `chain_links` so + that downstream transition periods can replay this period inside their + joint-Halton chain rebuild (see `_rebuild_chain_at_period`). + """ + transition_mask = result_params.index.get_level_values("category") == "transition" + transition_params = jnp.array( + result_params.loc[transition_mask, "value"].to_numpy() + ) + + shock_mask = result_params.index.get_level_values("category") == "shock_sds" + shock_sds = jnp.array(result_params.loc[shock_mask, "value"].to_numpy()) + + inv_eq_mask = result_params.index.get_level_values("category") == "investment_eq" + inv_eq_params = jnp.array(result_params.loc[inv_eq_mask, "value"].to_numpy()) + + inv_sd_mask = result_params.index.get_level_values("category") == "investment_sds" + inv_sds = jnp.array(result_params.loc[inv_sd_mask, "value"].to_numpy()) + + return ChainLink( + period=period, + transition_func=combined_transition, + transition_params=transition_params, + shock_sds=shock_sds, + shock_factor_indices=shock_factor_indices, + inv_eq_params=inv_eq_params, + inv_sds=inv_sds, + n_inv_eq_params_per=n_inv_eq_params_per, + obs_factor_values=obs_factor_values, + ) + + +def _update_conditional_distribution( + prev_distribution: ConditionalDistribution, + result_params: pd.DataFrame, + combined_transition: Callable, + joint_nodes: Array, + n_state: int, + n_endog: int, + n_shock: int, + shock_factor_indices: Array, + observed_factor_values: Array, + n_observed_factors: int, +) -> ConditionalDistribution: + """Build the next-period importance sample by chaining forward. + + For each mixture component l, each Halton index j, and each observation + i: + + 1. ``theta_prev = prev_samples[l][j, i, :]`` (no fresh draw). + 2. ``inv = beta_0 + beta_state @ theta_prev + beta_obs @ Y_i + + sigma_inv * z_inv[j]`` (current-period investment equation, + evaluated at the just-estimated parameters and the same z_inv that + the period-t likelihood used). + 3. ``theta_t = transition(full_prev_with_obs, trans_params) + + sigma_prod * z_prod[j]``. + + The result is a per-component array of shape + ``(n_halton, n_obs, n_state)`` which we hand to the next period's + likelihood. Per-component summary stats (mean, chol_cov) are computed + from each new sample for use by `posterior_states` and `inference`. + + This mirrors MATLAB's `create_nodes_weights_12` style: the previous + period's Halton-driven samples are propagated through the just-fitted + chain, and that chained sample becomes the next period's importance + distribution. + """ + # Extract estimated transition params, shock SDs, investment-equation + # params, and investment-shock SDs. + trans_mask = result_params.index.get_level_values("category") == "transition" + shock_mask = result_params.index.get_level_values("category") == "shock_sds" + inv_eq_mask = result_params.index.get_level_values("category") == "investment_eq" + inv_sd_mask = result_params.index.get_level_values("category") == "investment_sds" + + trans_params = jnp.array(result_params.loc[trans_mask, "value"].to_numpy()) + shock_sds = jnp.array(result_params.loc[shock_mask, "value"].to_numpy()) + inv_eq_params = ( + jnp.array(result_params.loc[inv_eq_mask, "value"].to_numpy()) + if inv_eq_mask.any() + else jnp.zeros(0) + ) + inv_sds = ( + jnp.array(result_params.loc[inv_sd_mask, "value"].to_numpy()) + if inv_sd_mask.any() + else jnp.zeros(0) + ) + + n_per_inv_eq = 1 + n_state + n_observed_factors if n_endog > 0 else 0 + + n_halton = joint_nodes.shape[0] + # The joint Halton design now has a larger dimension than just the + # current step's shocks (it also covers the chain rebuild's z_state + # and prior-step shocks; see `estimate_transition_period`). The + # current-step shocks live in the LAST `n_shock + n_endog` columns. + z_block_curr = n_shock + n_endog + + def _chain_one_component(prev_sample: Array) -> Array: + """Map (j, i) -> theta_t given prev_sample (n_halton, n_obs, n_state).""" + + def _at_node(j_idx: int, i_idx: int) -> Array: + theta_prev = prev_sample[j_idx, i_idx] + obs_y = ( + observed_factor_values[i_idx] + if n_observed_factors > 0 + else jnp.zeros(0) + ) + z_at_j_full = joint_nodes[j_idx] + z_at_j = z_at_j_full[-z_block_curr:] + z_shock = z_at_j[:n_shock] + z_inv_shock = z_at_j[n_shock:] + + # Investment equation at the just-estimated params. + inv = jnp.zeros(n_endog) + for k in range(n_endog): + beta = inv_eq_params[k * n_per_inv_eq : (k + 1) * n_per_inv_eq] + intercept = beta[0] + state_coeffs = beta[1 : 1 + n_state] + obs_coeffs = beta[1 + n_state :] + inv_k = ( + intercept + + jnp.dot(state_coeffs, theta_prev) + + jnp.dot(obs_coeffs, obs_y) + + inv_sds[k] * z_inv_shock[k] + ) + inv = inv.at[k].set(inv_k) # noqa: PD008 + + full_prev_with_obs = jnp.concatenate([theta_prev, inv, obs_y]) + state_shock_contrib = ( + jnp.zeros(n_state) # noqa: PD008 + .at[shock_factor_indices] + .set(shock_sds * z_shock) + ) + return combined_transition(full_prev_with_obs, trans_params) + ( + state_shock_contrib + ) + + n_obs = prev_sample.shape[1] + return jax.vmap( + jax.vmap(_at_node, in_axes=(None, 0)), + in_axes=(0, None), + )(jnp.arange(n_halton), jnp.arange(n_obs)) + + new_samples_per_component: list[Array] = [] + new_components: list[MixtureComponent] = [] + for prev_sample in prev_distribution.samples_per_component: + new_sample = _chain_one_component(prev_sample) + new_samples_per_component.append(new_sample) + # Summary stats: per-Halton mean across obs for posterior_states + # consumption. (Mean is also taken across obs to give a population- + # level summary; the actual likelihood uses the per-obs sample.) + flat = new_sample.reshape(-1, n_state) + new_mean = jnp.mean(flat, axis=0) + centered = flat - new_mean[None, :] + new_cov = (centered.T @ centered) / flat.shape[0] + 1e-8 * jnp.eye(n_state) + new_chol = jnp.linalg.cholesky(new_cov) + new_components.append(MixtureComponent(mean=new_mean, chol_cov=new_chol)) + + return ConditionalDistribution( + mixture_weights=prev_distribution.mixture_weights, + components=tuple(new_components), + samples_per_component=tuple(new_samples_per_component), + conditional_weights=prev_distribution.conditional_weights, + # Carry the period-0 Schur conditional payload AND the chain + # history forward; downstream transition steps replay the chain + # from period 0, not from this period's chained samples. + cond_means=prev_distribution.cond_means, + cond_chols=prev_distribution.cond_chols, + chain_links=prev_distribution.chain_links, + ) diff --git a/src/skillmodels/af/types.py b/src/skillmodels/af/types.py new file mode 100644 index 00000000..0ce8e2cd --- /dev/null +++ b/src/skillmodels/af/types.py @@ -0,0 +1,275 @@ +"""Frozen dataclass definitions for the AF estimator.""" + +from collections.abc import Callable, Mapping +from dataclasses import dataclass, field +from types import MappingProxyType +from typing import TYPE_CHECKING, Any, Literal + +import jax +import pandas as pd +from jax import Array + +from skillmodels.common.types import ensure_containers_are_immutable + +if TYPE_CHECKING: + from skillmodels.common.model_spec import ModelSpec + + +@dataclass(frozen=True, init=False) +class AFEstimationOptions: + """Configuration options for the AF estimator.""" + + n_halton_points: int + """Halton quadrature nodes per dimension.""" + + n_halton_points_shock: int + """Quadrature nodes for production shock integration.""" + + n_mixture_components: int + """Gaussian mixture components for initial distribution.""" + + optimizer_algorithm: str + """Optimization algorithm for each period's MLE.""" + + optimizer_options: MappingProxyType[str, Any] + """Additional options passed to optimagic.""" + + two_stage: bool + """Whether to use coarse-then-fine grid strategy.""" + + coarse_fraction: float + """Fraction of quadrature points for coarse stage (if two_stage is True).""" + + stability_floor: float + """Floor added to likelihood for numerical stability.""" + + n_obs_per_batch: int | None + """Observations per reverse-mode autodiff chunk. + + When `None` (default), an auto-detected value is derived from the + available GPU/CPU memory in `estimate_af`. Setting this to a small + integer trades compile time and throughput for lower peak VRAM; the + likelihood value is unchanged. + """ + + initialization_strategy: Literal["constant", "spearman", "amn"] + """Strategy for seeding optimizer start values. + + `"amn"` (default) runs the full AMN 2020 three-stage estimator + upfront and uses its parameter estimates as start values for the + per-period MLE. `"spearman"` uses Spearman cross-covariance + moments per period (factor-analysis identification) to seed + loadings, sigma_meas, sigma_shock, and sigma_inv. `"constant"` + reproduces the legacy 0.5 / 0.5*obs_sd defaults; provided for + regression testing and pre-fix reproducibility. + """ + + def __init__( # noqa: D107 + self, + n_halton_points: int = 50, + n_halton_points_shock: int = 30, + n_mixture_components: int = 2, + optimizer_algorithm: str = "fides", + optimizer_options: Mapping[str, Any] | None = None, + *, + two_stage: bool = False, + coarse_fraction: float = 0.5, + stability_floor: float = 1e-217, + n_obs_per_batch: int | None = None, + initialization_strategy: Literal["constant", "spearman", "amn"] = "amn", + ) -> None: + object.__setattr__(self, "n_halton_points", n_halton_points) + object.__setattr__(self, "n_halton_points_shock", n_halton_points_shock) + object.__setattr__(self, "n_mixture_components", n_mixture_components) + object.__setattr__(self, "optimizer_algorithm", optimizer_algorithm) + object.__setattr__( + self, + "optimizer_options", + ensure_containers_are_immutable(optimizer_options or {}), + ) + object.__setattr__(self, "two_stage", two_stage) + object.__setattr__(self, "coarse_fraction", coarse_fraction) + object.__setattr__(self, "stability_floor", stability_floor) + object.__setattr__(self, "n_obs_per_batch", n_obs_per_batch) + object.__setattr__(self, "initialization_strategy", initialization_strategy) + + +@dataclass(frozen=True) +class MixtureComponent: + """Single component of a Gaussian mixture distribution.""" + + mean: Array + """Mean vector, shape (n_factors,).""" + + chol_cov: Array + """Lower-triangular Cholesky factor of covariance, shape (n_factors, n_factors).""" + + +@dataclass(frozen=True) +class ChainLink: + """Frozen-period parameters for one prior step in the θ_0→θ_{t-1} chain. + + Used by the AF transition likelihood to rebuild the chained importance + sample on-demand from a single joint Halton design at every transition + step (mirroring MATLAB's ``create_nodes_weights_01/12``). Each + `ChainLink` carries the just-fitted parameters of one prior transition + so the chain can be replayed inside the next step's likelihood call. + """ + + period: int + """Calendar period at which this link applies (1-indexed; the link + transforms θ_{period-1} → θ_period).""" + + transition_func: Callable + """Combined per-factor transition function f(full_states, params).""" + + transition_params: Array + """Flat transition parameter vector for this period, shape + ``(total_n_transition_params,)``.""" + + shock_sds: Array + """Production shock SDs for shock-bearing state factors, shape + ``(n_shock_factors,)``.""" + + shock_factor_indices: Array + """Mapping each shock slot to its position in the state-factor + ordering, shape ``(n_shock_factors,)`` int.""" + + inv_eq_params: Array + """Flat investment-equation parameters, shape + ``(n_endogenous * n_inv_eq_params_per,)``.""" + + inv_sds: Array + """Investment shock SDs, shape ``(n_endogenous,)``.""" + + n_inv_eq_params_per: int + """Investment equation parameters per endogenous factor (1 + n_state + + n_observed_factors when n_endogenous > 0; 0 otherwise).""" + + obs_factor_values: Array + """Observed factor values at this link's source period (i.e. period - + 1), shape ``(n_obs, n_observed_factors)``. Used in the chain rebuild + for the inv equation and the transition function.""" + + +# Register ChainLink as a JAX pytree so tuples of ChainLinks can be passed +# through `jax.jit` in the AF transition likelihood. Array fields are +# leaves; the period index, transition function, and per-link int counts +# are static metadata baked into the trace. +jax.tree_util.register_dataclass( + ChainLink, + data_fields=[ + "transition_params", + "shock_sds", + "shock_factor_indices", + "inv_eq_params", + "inv_sds", + "obs_factor_values", + ], + meta_fields=["period", "transition_func", "n_inv_eq_params_per"], +) + + +@dataclass(frozen=True) +class ConditionalDistribution: + """Estimated conditional distribution of latent factors at a given period. + + Holds two things that downstream code consumes: + + * Per-component summary statistics (`mean`, `chol_cov`) of the chained + sample at this period — used by `posterior_states.py` and the + inference sandwich code. + * The chain history (`chain_links`) needed to rebuild the chained + sample on-demand inside the next transition step's likelihood (joint + Halton design — see `_rebuild_chain_at_period` in + `af.likelihood`). + + For the period-0 distribution: per-obs `cond_means` / `cond_chols` + encode the Schur conditional of latent factors given observed factors + (`Y_0`); `conditional_weights` are the Bayes posterior mixture weights + given `Y_0`. For later periods these are unused (chain replays from + period 0). + + Note: `samples_per_component` is retained for backward compatibility + and posterior-state-summary computation, but is no longer load-bearing + inside the transition likelihood (which rebuilds the chain on-demand). + """ + + mixture_weights: Array + """Mixture weights, shape (n_components,).""" + + components: tuple[MixtureComponent, ...] + """Per-component summary statistics (mean, chol_cov) derived from the + importance sample. Used by `posterior_states` and `inference`; not used + in the transition likelihood itself.""" + + samples_per_component: tuple[Array, ...] + """One importance-sample array per mixture component, each shape + ``(n_halton, n_obs, n_state)``. Retained for posterior-state summary + statistics; not consumed by the transition likelihood (which rebuilds + the chain on-demand from a joint Halton). May use a smaller Halton + count than the likelihood's `n_halton_points`.""" + + conditional_weights: Array | None = None + """Individual-specific conditional mixture weights, shape (n_obs, n_components). + + When not None, these override `mixture_weights` for each observation (computed + from Bayes' rule using data from previous periods). + """ + + cond_means: Array | None = None + """Per-obs Schur-conditional means of the latent state given observed + factors at period 0, shape ``(n_components, n_obs, n_state)``. Built + by the initial period only. None for transition-period distributions. + """ + + cond_chols: Array | None = None + """Per-component Schur-conditional Cholesky factors at period 0, shape + ``(n_components, n_state, n_state)``. Shared across observations + because the conditional covariance does not depend on Y_i (it's the + prior cov_yy minus a Schur term). None for transition-period + distributions.""" + + chain_links: tuple[ChainLink, ...] = field(default_factory=tuple) + """Sequence of frozen prior-period parameter packages, one per + transition already estimated. Empty before period 1; one entry after + period 1 estimation; two entries after period 2; etc. Used by the + transition likelihood to rebuild the chained sample on-demand from a + single joint Halton.""" + + +@dataclass(frozen=True) +class AFPeriodResult: + """Result from estimating a single period.""" + + period: int + """Calendar period index.""" + + params: pd.DataFrame + """Estimated parameters with 4-level MultiIndex (category, period, name1, name2).""" + + loglikelihood: float + """Log-likelihood value at the optimum.""" + + success: bool + """Whether optimization converged.""" + + optimize_result: Any + """Raw optimagic result object.""" + + +@dataclass(frozen=True) +class AFEstimationResult: + """Complete result from AF estimation across all periods.""" + + period_results: tuple[AFPeriodResult, ...] + """Per-period estimation results, ordered by period.""" + + all_params: pd.DataFrame + """Combined parameters from all periods with standard 4-level MultiIndex.""" + + model_spec: ModelSpec + """The ModelSpec used for estimation.""" + + conditional_distributions: tuple[ConditionalDistribution, ...] + """Estimated conditional distributions per period (for filtered states).""" diff --git a/src/skillmodels/af/validate.py b/src/skillmodels/af/validate.py new file mode 100644 index 00000000..48bae8e0 --- /dev/null +++ b/src/skillmodels/af/validate.py @@ -0,0 +1,124 @@ +"""AF-specific ModelSpec validation.""" + +import warnings + +from skillmodels.common.model_spec import FactorSpec, ModelSpec + +# Transition functions compatible with AF estimation (parametric, differentiable). +_AF_COMPATIBLE_TRANSITIONS = frozenset( + { + "linear", + "translog", + "robust_translog", + "log_ces", + "log_ces_with_constant", + "log_ces_general", + "linear_and_squares", + } +) + +# Hard minimum: 2 measurements + a loading normalization just-identify the +# per-period measurement system (3 moments — Var(Z1), Var(Z2), Cov(Z1,Z2) — +# vs 1 free loading + 2 sigma_meas) given Var(F) pinned by the chain. +_MIN_MEASURES_PER_FACTOR = 2 +# Recommended minimum: the AF paper's identification arguments assume 3 +# indicators per factor per period (over-identified Spearman moments). +# Below this, Stage-B Spearman is noisy and cross-period equality +# constraints on loadings / sigma_meas become load-bearing for ID. +_RECOMMENDED_MEASURES_PER_FACTOR = 3 + + +def validate_af_model(model_spec: ModelSpec) -> None: + """Validate that a ModelSpec is compatible with AF estimation. + + Check: + - At least 3 measurements per factor in each period where the factor is measured + - Transition functions are parametric (built-in or registered) + - Normalizations are present for each factor + + Raise: + ValueError: If validation fails, with a detailed error message. + + """ + errors: list[str] = [] + for factor_name, factor_spec in model_spec.factors.items(): + errors.extend(_validate_factor(factor_name, factor_spec)) + + if errors: + msg = "ModelSpec is not compatible with AF estimation:\n" + "\n".join( + f" - {e}" for e in errors + ) + raise ValueError(msg) + + +def _validate_factor(factor_name: str, factor_spec: FactorSpec) -> list[str]: + """Return a list of error messages for a single factor.""" + errors: list[str] = [] + + # Check measurements: need >= 2 per factor in each active period; warn + # below 3 (the recommended count from the AF paper). + for period, measures in enumerate(factor_spec.measurements): + if len(measures) == 0: + continue + if len(measures) < _MIN_MEASURES_PER_FACTOR: + errors.append( + f"Factor '{factor_name}' period {period}: AF requires at least " + f"{_MIN_MEASURES_PER_FACTOR} measurements, got {len(measures)}." + ) + elif len(measures) < _RECOMMENDED_MEASURES_PER_FACTOR: + warnings.warn( + f"Factor '{factor_name}' period {period}: only {len(measures)} " + f"measurements (AF paper assumes at least " + f"{_RECOMMENDED_MEASURES_PER_FACTOR}). Identification of " + f"loadings + sigma_meas at this period relies on " + f"cross-period equality constraints across the AF MLE chain; " + f"supply explicit `fixed_params` for the loading if needed.", + stacklevel=3, + ) + + # Check transition function is parametric + tf = factor_spec.transition_function + if tf is not None and isinstance(tf, str) and tf not in _AF_COMPATIBLE_TRANSITIONS: + errors.append( + f"Factor '{factor_name}': transition function '{tf}' is not in the " + f"set of AF-compatible functions: {sorted(_AF_COMPATIBLE_TRANSITIONS)}." + ) + # Custom callables are accepted if they have __registered_params__ + if callable(tf) and not hasattr(tf, "__registered_params__"): + errors.append( + f"Factor '{factor_name}': custom transition function must be decorated " + f"with @register_params to be used with AF estimation." + ) + + # Check normalizations exist + if factor_spec.normalizations is None: + errors.append( + f"Factor '{factor_name}': AF requires explicit normalizations " + f"(loading=1, intercept=0 for at least one measurement per period)." + ) + + # has_initial_distribution=False requires is_endogenous=True so the + # factor can be reconstructed via the investment equation at period 0. + if not factor_spec.has_initial_distribution and not factor_spec.is_endogenous: + errors.append( + f"Factor '{factor_name}': has_initial_distribution=False is only " + f"supported for endogenous factors (set is_endogenous=True)." + ) + + # Factors without an initial distribution must also not be measured at + # period 0: their value at period 0 is not drawn from any mixture, so a + # measurement density there would have no latent value to hit. + if ( + not factor_spec.has_initial_distribution + and len(factor_spec.measurements) > 0 + and len(factor_spec.measurements[0]) > 0 + ): + errors.append( + f"Factor '{factor_name}': has_initial_distribution=False requires " + f"empty measurements at period 0 (got " + f"{factor_spec.measurements[0]!r}). Drop them from the FactorSpec; " + f"their contribution would typically be absorbed into the " + f"transition step 0->1 in a MATLAB-style reproduction." + ) + + return errors diff --git a/src/skillmodels/amn/__init__.py b/src/skillmodels/amn/__init__.py new file mode 100644 index 00000000..cfe07022 --- /dev/null +++ b/src/skillmodels/amn/__init__.py @@ -0,0 +1,79 @@ +"""AMN: Attanasio-Meghir-Nix (2020) latent factor estimator (and start values). + +This package exposes two distinct surfaces: + +1. **Start-value helpers** -- the Spearman cross-covariance moments + (`spearman_factor_moments`) and Bartlett-score OLS + (`seed_beta_from_ols`) that seed every estimator's starting values + (`get_spearman_start_params`, used by CHS and AF). + +2. **Full AMN estimator** -- a three-stage mixture-EM / + minimum-distance / simulate-and-regress procedure mirroring AMN 2020, + plus bootstrap inference and a per-observation posterior-state helper + for diagnostic plots. + +Public API: + +* Start-value helpers: `spearman_factor_moments`, `derive_unexplained_sd`, + `seed_beta_from_ols`, `SpearmanResult`, `get_spearman_start_params`, + `pool_equality_groups`. +* AMN estimator: `estimate_amn`, `compute_amn_standard_errors`, + `get_amn_posterior_states`, `AMNEstimationOptions`, + `AMNEstimationResult`, `AMNInferenceResult`, `AMNStageResults`. +* Stage 1 building blocks (for testing / advanced use): + `fit_mixture_em`, `build_augmented_measure_layout`, + `build_augmented_measure_matrix`, `MixtureFitResult`, + `AugmentedMeasureLayout`. +""" + +from skillmodels.amn.estimate import estimate_amn +from skillmodels.amn.inference import compute_amn_standard_errors +from skillmodels.amn.mixture_em import ( + build_augmented_measure_layout, + build_augmented_measure_matrix, + fit_mixture_em, +) +from skillmodels.amn.moments import ( + SpearmanResult, + derive_unexplained_sd, + seed_beta_from_ols, + spearman_factor_moments, +) +from skillmodels.amn.posterior_states import get_amn_posterior_states +from skillmodels.amn.start_values import ( + get_spearman_start_params, + pool_equality_groups, +) +from skillmodels.amn.types import ( + AMNEstimationOptions, + AMNEstimationResult, + AMNInferenceResult, + AMNStageResults, + AugmentedMeasureLayout, + MinimumDistanceResult, + MixtureFitResult, + ProductionFitResult, +) + +__all__ = [ + "AMNEstimationOptions", + "AMNEstimationResult", + "AMNInferenceResult", + "AMNStageResults", + "AugmentedMeasureLayout", + "MinimumDistanceResult", + "MixtureFitResult", + "ProductionFitResult", + "SpearmanResult", + "build_augmented_measure_layout", + "build_augmented_measure_matrix", + "compute_amn_standard_errors", + "derive_unexplained_sd", + "estimate_amn", + "fit_mixture_em", + "get_amn_posterior_states", + "get_spearman_start_params", + "pool_equality_groups", + "seed_beta_from_ols", + "spearman_factor_moments", +] diff --git a/src/skillmodels/amn/estimate.py b/src/skillmodels/amn/estimate.py new file mode 100644 index 00000000..2f3b487d --- /dev/null +++ b/src/skillmodels/amn/estimate.py @@ -0,0 +1,176 @@ +"""Top-level orchestration for the three-stage AMN estimator. + +Chains the three stages: + +1. `mixture_em.fit_mixture_em` -> reduced-form Pi, Psi +2. `minimum_distance.solve_minimum_distance` -> structural Lambda, A, Sigma, mu, Omega +3. `simulate_and_regress.simulate_and_regress` -> production-function params + +and merges the resulting parameter pieces into a single skillmodels +params DataFrame. +""" + +import optimagic as om +import pandas as pd + +from skillmodels.amn.minimum_distance import solve_minimum_distance +from skillmodels.amn.mixture_em import ( + build_augmented_measure_layout, + build_augmented_measure_matrix, + fit_mixture_em, +) +from skillmodels.amn.simulate_and_regress import simulate_and_regress +from skillmodels.amn.types import ( + AMNEstimationOptions, + AMNEstimationResult, + AMNStageResults, + MinimumDistanceResult, +) +from skillmodels.common.model_spec import ModelSpec +from skillmodels.common.process_model import process_model + + +def _measurement_params_dataframe( + structural: MinimumDistanceResult, +) -> pd.DataFrame: + """Translate Stage 2 outputs into rows of the standard params DataFrame.""" + rows: list[tuple[str, int, str, str, float]] = [] + for idx, row in structural.loadings.iterrows(): + period, meas, factor = idx # ty: ignore[not-iterable] + rows.append( + ("loadings", int(period), str(meas), str(factor), float(row["loading"])) + ) + for idx, row in structural.measurement_intercepts.iterrows(): + period, meas = idx # ty: ignore[not-iterable] + rows.append( + ("controls", int(period), str(meas), "constant", float(row["intercept"])) + ) + for idx, row in structural.measurement_sds.iterrows(): + period, meas = idx # ty: ignore[not-iterable] + rows.append(("meas_sds", int(period), str(meas), "-", float(row["sd"]))) + if not rows: + return pd.DataFrame( + {"value": []}, + index=pd.MultiIndex.from_tuples( + [], names=["category", "aug_period", "name1", "name2"] + ), + ) + index = pd.MultiIndex.from_tuples( + [(c, p, n1, n2) for c, p, n1, n2, _ in rows], + names=["category", "aug_period", "name1", "name2"], + ) + values = [v for *_, v in rows] + return pd.DataFrame({"value": values}, index=index) + + +def _apply_overrides( + params: pd.DataFrame, + *, + fixed_params: pd.DataFrame | None, + start_params: pd.DataFrame | None, +) -> pd.DataFrame: + """Overlay user-supplied fixed_params and start_params on `params`. + + `fixed_params` wins over `start_params`, which wins over the + estimated values. Rows in the overrides not present in `params` are + added; rows in `params` not present in the overrides are kept. + """ + out = params.copy() + if start_params is not None and not start_params.empty: + merged = out.reindex(out.index.union(start_params.index)) + merged.loc[start_params.index, "value"] = start_params["value"] + out = merged + if fixed_params is not None and not fixed_params.empty: + merged = out.reindex(out.index.union(fixed_params.index)) + merged.loc[fixed_params.index, "value"] = fixed_params["value"] + out = merged + return out.sort_index() + + +def estimate_amn( + model_spec: ModelSpec, + data: pd.DataFrame, + amn_options: AMNEstimationOptions | None = None, + start_params: pd.DataFrame | None = None, + fixed_params: pd.DataFrame | None = None, + constraints: list[om.constraints.Constraint] | None = None, +) -> AMNEstimationResult: + """Estimate a latent factor model using the Attanasio-Meghir-Nix method. + + Args: + model_spec: Same model spec used by CHS and AF. + data: Panel dataset in long format with MultiIndex (id, period). + amn_options: AMN-specific options. If None, uses defaults. + start_params: Optional starting parameter values; overlaid on the + estimated combined params DataFrame as well as on Stage 1 EM + starts (the latter not yet wired). + fixed_params: Parameters to pin during estimation. Currently + applied as a post-hoc override on the combined params + DataFrame; future revisions may enforce them inside each + stage's optimizer. + constraints: Reserved for forward-compatibility (equality + constraints from optimagic). Not yet honoured inside the AMN + stages; pass-through only. + + Return: + AMNEstimationResult containing per-stage outputs and the combined + params DataFrame. + + """ + del constraints # forward-compat hook; AMN stages do not yet honour these + if amn_options is None: + amn_options = AMNEstimationOptions() + + processed_model = process_model(model_spec) + layout = build_augmented_measure_layout(processed_model) + augmented = build_augmented_measure_matrix(data, processed_model, layout) + + mixture = fit_mixture_em( + augmented, + n_components=amn_options.n_mixture_components, + max_iter=amn_options.em_max_iter, + tol=amn_options.em_tol, + n_init=amn_options.em_n_init, + reg_covar=amn_options.em_reg_covar, + seed=amn_options.seed, + layout=layout, + ) + + structural = solve_minimum_distance( + mixture, + processed_model, + weighting=amn_options.minimum_distance_weighting, + algorithm=amn_options.optimizer_algorithm, + ) + + production = simulate_and_regress( + structural, + processed_model, + model_spec, + mixture_weights=mixture.weights, + n_draws=amn_options.n_simulation_draws, + seed=amn_options.seed, + investment_endogeneity=amn_options.investment_endogeneity, + ) + + measurement = _measurement_params_dataframe(structural) + all_params = pd.concat( + [measurement, production.production_params, production.investment_params] + ).sort_index() + all_params = _apply_overrides( + all_params, fixed_params=fixed_params, start_params=start_params + ) + + success = structural.success and mixture.converged + + return AMNEstimationResult( + model_spec=model_spec, + stages=AMNStageResults( + mixture=mixture, + structural=structural, + production=production, + ), + all_params=all_params, + success=success, + synthetic_panel=None, + ) diff --git a/src/skillmodels/amn/inference.py b/src/skillmodels/amn/inference.py new file mode 100644 index 00000000..2c119c08 --- /dev/null +++ b/src/skillmodels/amn/inference.py @@ -0,0 +1,130 @@ +"""Bootstrap inference for the AMN estimator. + +Cluster (caseid-level) nonparametric bootstrap that re-runs all three +estimation stages on each replicate, mirroring AMN 2020 p. 2523: + + "To estimate confidence intervals and obtain critical values for + test statistics, we use the non-parametric bootstrap over all three + steps." + +Each bootstrap replicate: + +1. Resamples caseids with replacement (size = n_clusters). +2. Calls `estimate_amn` on the resampled panel with the same options. +3. Stores the resulting `all_params` row. + +After `n_boot` replicates, the standard errors are the column-wise std +across replicate parameter vectors, and the covariance is the +column-wise covariance. The first replicate inherits the original +fit's params (resampling is i.i.d.; no need to recompute the point +estimate). +""" + +import warnings + +import numpy as np +import pandas as pd + +from skillmodels.amn.estimate import estimate_amn +from skillmodels.amn.types import ( + AMNEstimationOptions, + AMNEstimationResult, + AMNInferenceResult, +) + + +def _resample_by_caseid(data: pd.DataFrame, rng: np.random.Generator) -> pd.DataFrame: + """Draw a caseid bootstrap sample with replacement.""" + case_level = str(data.index.names[0]) + caseids = data.index.get_level_values(case_level).unique() + n = len(caseids) + sampled = caseids[rng.integers(0, n, size=n)] + # Rebuild the panel with fresh sequential caseids so duplicates from + # the bootstrap survive the (caseid, period) uniqueness assumed by + # build_augmented_measure_matrix. + pieces = [] + for new_id, original_id in enumerate(sampled): + block = data.xs(original_id, level=case_level, drop_level=False).copy() + old_periods = block.index.get_level_values(1) + block.index = pd.MultiIndex.from_arrays( + [np.full(len(block), new_id), old_periods], + names=data.index.names, + ) + pieces.append(block) + return pd.concat(pieces) + + +def compute_amn_standard_errors( + result: AMNEstimationResult, + data: pd.DataFrame, + amn_options: AMNEstimationOptions | None = None, + *, + n_boot: int = 1_000, + seed: int = 0, +) -> AMNInferenceResult: + """Cluster-bootstrap standard errors for AMN parameter estimates. + + Args: + result: A fitted `AMNEstimationResult` (used to determine the + parameter index and as a fallback when a replicate fails). + data: Panel dataset used for the original fit. + amn_options: AMN options for replicate estimation. If None, + uses defaults (same as `estimate_amn`). + n_boot: Number of bootstrap replicates. + seed: RNG seed. + + Return: + AMNInferenceResult with replicate-level params, std errors, and + covariance. + + """ + if amn_options is None: + amn_options = AMNEstimationOptions() + + rng = np.random.default_rng(seed) + case_level = str(data.index.names[0]) + caseids = data.index.get_level_values(case_level).unique() + n_clusters = len(caseids) + + base_index = result.all_params.index + replicate_rows: list[pd.Series] = [] + n_failed = 0 + for b in range(n_boot): + boot_data = _resample_by_caseid(data, rng) + try: + boot_result = estimate_amn( + result.model_spec, + boot_data, + amn_options, + ) + row = boot_result.all_params.reindex(base_index)["value"] + except (np.linalg.LinAlgError, ValueError, RuntimeError) as exc: + n_failed += 1 + warnings.warn( + f"AMN bootstrap replicate {b} failed: {exc}", + RuntimeWarning, + stacklevel=2, + ) + row = pd.Series(np.nan, index=base_index) + replicate_rows.append(row) + + replicate_df = pd.DataFrame(replicate_rows).reset_index(drop=True) + replicate_df.columns = base_index + standard_errors = replicate_df.std(axis=0, ddof=1) + vcov = replicate_df.cov(ddof=1) + + if n_failed > 0: + warnings.warn( + f"{n_failed}/{n_boot} AMN bootstrap replicates failed; " + "standard errors may be biased.", + RuntimeWarning, + stacklevel=2, + ) + + return AMNInferenceResult( + standard_errors=standard_errors, + vcov=vcov, + replicate_params=replicate_df, + n_clusters=n_clusters, + n_boot=n_boot, + ) diff --git a/src/skillmodels/amn/minimum_distance.py b/src/skillmodels/amn/minimum_distance.py new file mode 100644 index 00000000..b73b0073 --- /dev/null +++ b/src/skillmodels/amn/minimum_distance.py @@ -0,0 +1,559 @@ +"""Stage 2 of the AMN estimator: structural recovery via minimum distance. + +Takes the reduced-form mixture parameters (Pi_k, Psi_k) from Stage 1 +(`skillmodels.amn.mixture_em`) and recovers the structural parameters +(Lambda, A, Sigma, mu_k, Omega_k) subject to the AMN-paper constraint +structure (eq. 12-13): factor-measurement zero pattern in Lambda, +age-invariance for time-invariant factors, scale normalization +(lambda=1 on the reference measure per factor), and the period-0 +mean-zero restriction. + +Mirrors `STEP2_func.R` from the AMN 2020 supplementary archive: a +packed-parameter L-BFGS-B optimizer over the sum-of-squares distance +between the EM-fitted moments (Pi_m, Psi_m) and the model-implied +moments parameterized by structural quantities. +""" + +from dataclasses import dataclass + +import numpy as np +import optimagic as om +import pandas as pd + +from skillmodels.amn.types import ( + AugmentedMeasureLayout, + MinimumDistanceResult, + MixtureFitResult, +) +from skillmodels.common.types import ProcessedModel + + +@dataclass(frozen=True) +class _Structure: + """Pre-computed structural layout for minimum-distance recovery. + + Carries the slot-to-factor-period mapping plus all the + free/normalized/zero masks needed by the optimizer. + """ + + factor_period_slots: tuple[tuple[int, str], ...] + """Ordered (period, factor_name) for the structural mu / Omega columns. + Latent and observed-factor / control slots are all included.""" + + n_factor_slots: int + """``len(factor_period_slots)``.""" + + n_aug: int + """Number of rows in the augmented measure vector.""" + + lambda_value: np.ndarray + """Initial Lambda matrix (zeros + normalized 1s where pinned).""" + + lambda_free_mask: np.ndarray + """Boolean (n_aug, n_factor_slots): True where Lambda is free.""" + + intercept_value: np.ndarray + """Initial intercept vector (zeros + normalized values where pinned).""" + + intercept_free_mask: np.ndarray + """Boolean (n_aug,): True where the intercept is free.""" + + sigma2_free_mask: np.ndarray + """Boolean (n_aug,): True where the measurement-error variance is free. + False for observed-factor / control slots (zero by construction).""" + + baseline_mean_zero_slots: tuple[int, ...] + """Indices into ``factor_period_slots`` for which the K-th mixture's + mean is determined by the tau-weighted sum-to-zero constraint + (AMN eq. 13). Typically the period-0 latent-factor slots.""" + + +def _build_structure( # noqa: C901, PLR0912, PLR0915 + layout: AugmentedMeasureLayout, + processed_model: ProcessedModel, +) -> _Structure: + """Translate the augmented layout into per-Lambda/A/Sigma constraint masks. + + For each augmented slot, decides which structural factor-period column + it loads on, and whether its Lambda / A / Sigma entries are free + (estimated) or pinned (normalized or zero by construction). + """ + n_aug = len(layout.columns) + normalizations = processed_model.normalizations + aug_to_period = processed_model.labels.aug_periods_to_periods + observed_factor_names = processed_model.labels.observed_factors + + # Collect factor-period slots: one per (period, factor) that actually + # has at least one row loading on it (latent measurements) OR is the + # "self-slot" of an observed factor / control augmented row. + slots: list[tuple[int, str]] = [] + slot_index: dict[tuple[int, str], int] = {} + for _slot, (period, factor, _meas) in zip( + layout.measurement_slots, layout.measurement_meta, strict=True + ): + key = (period, factor) + if key not in slot_index: + slot_index[key] = len(slots) + slots.append(key) + for _slot, (period, of_name) in zip( + layout.observed_factor_slots, layout.observed_factor_meta, strict=True + ): + key = (period, of_name) + if key not in slot_index: + slot_index[key] = len(slots) + slots.append(key) + for ctrl in layout.control_meta: + # Controls collapse to a single period (-1 = time-invariant marker). + key = (-1, ctrl) + if key not in slot_index: + slot_index[key] = len(slots) + slots.append(key) + + n_slots = len(slots) + lambda_value = np.zeros((n_aug, n_slots)) + lambda_free_mask = np.zeros((n_aug, n_slots), dtype=bool) + intercept_value = np.zeros(n_aug) + intercept_free_mask = np.zeros(n_aug, dtype=bool) + sigma2_free_mask = np.zeros(n_aug, dtype=bool) + + # Latent-factor measurement slots. + for aug_idx, (period, factor, meas_name) in zip( + layout.measurement_slots, layout.measurement_meta, strict=True + ): + sigma2_free_mask[aug_idx] = True + col = slot_index[(period, factor)] + # Determine whether the loading at this (period, factor, meas) is + # normalized (typically the "first" measurement per factor) or + # free. Skillmodels stores normalizations per aug_period; walk the + # aug_periods that map to this calendar period and inspect them. + loading_normalized = False + intercept_normalized = False + loading_norm_value = 1.0 + intercept_norm_value = 0.0 + if factor in normalizations: + for aug_period, cal_period in aug_to_period.items(): + if int(cal_period) != int(period): + continue + load_map = normalizations[factor].loadings[aug_period] + int_map = normalizations[factor].intercepts[aug_period] + if meas_name in load_map: + loading_normalized = True + loading_norm_value = float(load_map[meas_name]) + if meas_name in int_map: + intercept_normalized = True + intercept_norm_value = float(int_map[meas_name]) + if loading_normalized: + lambda_value[aug_idx, col] = loading_norm_value + else: + lambda_free_mask[aug_idx, col] = True + if intercept_normalized: + intercept_value[aug_idx] = intercept_norm_value + else: + intercept_free_mask[aug_idx] = True + + # Observed-factor slots: load on their own column with lambda=1, + # sigma=0 (perfectly observed); intercept is free (the mixture mean + # shifts the slot). + for aug_idx, (period, of_name) in zip( + layout.observed_factor_slots, layout.observed_factor_meta, strict=True + ): + col = slot_index[(period, of_name)] + lambda_value[aug_idx, col] = 1.0 + # sigma2 stays False (pinned to zero by construction). + intercept_free_mask[aug_idx] = True + + # Control slots: same pattern as observed factors (lambda=1, sigma=0). + for aug_idx, ctrl in zip(layout.control_slots, layout.control_meta, strict=True): + col = slot_index[(-1, ctrl)] + lambda_value[aug_idx, col] = 1.0 + intercept_free_mask[aug_idx] = True + + del observed_factor_names + + # Mean-zero baseline: period-0 latent-factor slots get pinned by the + # tau-weighted sum-to-zero constraint. Observed factors / controls + # have free means (no normalization needed; they're directly + # observed). + latent_factor_names = set(processed_model.labels.latent_factors) + baseline_slot_ids = tuple( + slot_index[(p, f)] for (p, f) in slots if p == 0 and f in latent_factor_names + ) + + return _Structure( + factor_period_slots=tuple(slots), + n_factor_slots=n_slots, + n_aug=n_aug, + lambda_value=lambda_value, + lambda_free_mask=lambda_free_mask, + intercept_value=intercept_value, + intercept_free_mask=intercept_free_mask, + sigma2_free_mask=sigma2_free_mask, + baseline_mean_zero_slots=baseline_slot_ids, + ) + + +def _pack_layout(struct: _Structure, n_components: int) -> tuple[int, dict[str, slice]]: + """Decide the layout of the flat optimizer parameter vector. + + Returns: + ------- + n_total + Total length of the parameter vector. + slices + Mapping from parameter section name to a `slice` into the flat + vector. Sections: + + - ``"sigma2"`` -- free entries of the measurement-error + variances. + - ``"chol_"`` for ``m`` in 0..n_components-1 -- lower-tri + Cholesky elements of Omega_m, packed row-major. + - ``"mu_"`` for ``m`` in 0..n_components-2 -- the free + entries of mu_m (i.e. excluding the K-th mixture, which is + determined by the mean-zero constraint at baseline slots + and by free params elsewhere... actually we still free + mu_K at non-baseline slots; only the baseline slots of + mu_K are derived). + """ + slices: dict[str, slice] = {} + cursor = 0 + + n_sigma2_free = int(struct.sigma2_free_mask.sum()) + slices["sigma2"] = slice(cursor, cursor + n_sigma2_free) + cursor += n_sigma2_free + + n_factor = struct.n_factor_slots + n_chol_per = n_factor * (n_factor + 1) // 2 + for m in range(n_components): + slices[f"chol_{m}"] = slice(cursor, cursor + n_chol_per) + cursor += n_chol_per + + n_baseline = len(struct.baseline_mean_zero_slots) + # mu has shape (n_components, n_factor); for the K-th mixture, the + # baseline_mean_zero_slots are determined => those are excluded. + n_mu_free = n_components * n_factor - n_baseline + slices["mu"] = slice(cursor, cursor + n_mu_free) + cursor += n_mu_free + + n_lambda_free = int(struct.lambda_free_mask.sum()) + slices["lambda"] = slice(cursor, cursor + n_lambda_free) + cursor += n_lambda_free + + n_intercept_free = int(struct.intercept_free_mask.sum()) + slices["intercept"] = slice(cursor, cursor + n_intercept_free) + cursor += n_intercept_free + + return cursor, slices + + +def _unpack( + flat: np.ndarray, + struct: _Structure, + slices: dict[str, slice], + *, + n_components: int, + mixture_weights: np.ndarray, +) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + """Decode a flat parameter vector into (sigma2, Omega, mu, Lambda, A). + + Applies the tau-weighted mean-zero constraint to mu_K at the + baseline_mean_zero_slots. + """ + n_factor = struct.n_factor_slots + + sigma2 = np.zeros(struct.n_aug) + sigma2[struct.sigma2_free_mask] = flat[slices["sigma2"]] + + omegas = np.zeros((n_components, n_factor, n_factor)) + tril_rows, tril_cols = np.tril_indices(n_factor) + for m in range(n_components): + chol = np.zeros((n_factor, n_factor)) + chol[tril_rows, tril_cols] = flat[slices[f"chol_{m}"]] + omegas[m] = chol @ chol.T + + mu = np.zeros((n_components, n_factor)) + baseline_set = set(struct.baseline_mean_zero_slots) + free_mu_positions: list[tuple[int, int]] = [] + for m in range(n_components): + is_last = m == n_components - 1 + for j in range(n_factor): + if is_last and j in baseline_set: + continue + free_mu_positions.append((m, j)) + mu_values = flat[slices["mu"]] + for (m, j), val in zip(free_mu_positions, mu_values, strict=True): + mu[m, j] = val + # Enforce mean-zero at baseline slots for the last mixture. + if baseline_set and n_components > 1: + for j in struct.baseline_mean_zero_slots: + num = -np.sum(mixture_weights[:-1] * mu[:-1, j]) + mu[-1, j] = num / mixture_weights[-1] + + lambda_mat = struct.lambda_value.copy() + lambda_mat[struct.lambda_free_mask] = flat[slices["lambda"]] + + intercept = struct.intercept_value.copy() + intercept[struct.intercept_free_mask] = flat[slices["intercept"]] + + return sigma2, omegas, mu, lambda_mat, intercept + + +def _model_implied_moments( + sigma2: np.ndarray, + omegas: np.ndarray, + mu: np.ndarray, + lambda_mat: np.ndarray, + intercept: np.ndarray, +) -> tuple[np.ndarray, np.ndarray]: + """Compute (per-component mean, cov) implied by the structural params. + + Returns shapes ``(K, n_aug)`` and ``(K, n_aug, n_aug)`` respectively. + """ + n_components = omegas.shape[0] + n_aug = intercept.shape[0] + means = np.empty((n_components, n_aug)) + covs = np.empty((n_components, n_aug, n_aug)) + diag_sigma2 = np.diag(sigma2) + for m in range(n_components): + means[m] = intercept + lambda_mat @ mu[m] + covs[m] = lambda_mat @ omegas[m] @ lambda_mat.T + diag_sigma2 + return means, covs + + +def _objective( + flat: np.ndarray, + struct: _Structure, + slices: dict[str, slice], + *, + n_components: int, + mixture_weights: np.ndarray, + target_means: np.ndarray, + target_covs: np.ndarray, +) -> float: + sigma2, omegas, mu, lam, inter = _unpack( + flat, + struct, + slices, + n_components=n_components, + mixture_weights=mixture_weights, + ) + pred_means, pred_covs = _model_implied_moments(sigma2, omegas, mu, lam, inter) + diff_mean = pred_means - target_means + diff_cov = pred_covs - target_covs + return float(np.sum(diff_mean**2) + np.sum(diff_cov**2)) + + +def _initial_guess( + struct: _Structure, + slices: dict[str, slice], + *, + n_components: int, + n_total: int, + target_means: np.ndarray, # noqa: ARG001 + target_covs: np.ndarray, +) -> np.ndarray: + """Build a sensible starting vector from the EM moments. + + Seeds sigma^2 from the average diagonal of the EM covariances scaled + down by 0.5 (so factors keep at least half the explained variance); + seeds each Omega Cholesky from the cholesky of the average EM + covariance restricted to the factor-period block; seeds mu_m from + the EM means at the corresponding slot identities. + """ + flat = np.zeros(n_total) + + diag_avg = np.mean(np.diagonal(target_covs, axis1=1, axis2=2), axis=0) + sigma2_guess = 0.25 * np.clip(diag_avg, 1e-3, None) + flat[slices["sigma2"]] = sigma2_guess[struct.sigma2_free_mask] + + # Project the average EM covariance onto a roughly diagonal Omega in + # the factor-period basis. For v0 we use the identity rescaled by + # the average non-error variance trace; this is a safe, well-defined + # start. + avg_factor_var = np.maximum(diag_avg.mean() * 0.5, 1e-2) + n_factor = struct.n_factor_slots + init_chol = np.sqrt(avg_factor_var) * np.eye(n_factor) + tril_rows, tril_cols = np.tril_indices(n_factor) + init_chol_vec = init_chol[tril_rows, tril_cols] + for m in range(n_components): + flat[slices[f"chol_{m}"]] = init_chol_vec + + # Seed mu_m from each EM component's projection onto the slot space + # via least-squares (lambda_value pseudo-inverse on the centered + # means). For v0 use a simpler heuristic: spread the EM means across + # mixtures using a small dispersion around zero. + flat[slices["mu"]] = 0.0 + + flat[slices["lambda"]] = 1.0 + flat[slices["intercept"]] = 0.0 + return flat + + +def _lower_bounds( + struct: _Structure, # noqa: ARG001 + slices: dict[str, slice], + n_total: int, +) -> np.ndarray: + bounds = np.full(n_total, -np.inf) + bounds[slices["sigma2"]] = 1e-8 + return bounds + + +def solve_minimum_distance( + mixture: MixtureFitResult, + processed_model: ProcessedModel, + *, + weighting: str = "identity", + algorithm: str = "scipy_lbfgsb", +) -> MinimumDistanceResult: + """Recover structural parameters from the reduced-form mixture. + + Args: + mixture: Stage 1 fit (reduced-form Pi, Psi per component). + processed_model: Skillmodels processed model (provides normalization + and constraint structure). + weighting: ``"identity"`` (default, fast) or ``"optimal"`` + (uses an Avar estimate of the EM moments). + algorithm: optimagic algorithm name (default ``scipy_lbfgsb``). + + Return: + MinimumDistanceResult with structural Lambda, A, Sigma, and the + per-component factor means and covariances. + + """ + if weighting not in ("identity", "optimal"): + msg = f"Unknown weighting '{weighting}'." + raise ValueError(msg) + if weighting == "optimal": + msg = "Optimal weighting not yet implemented; use 'identity'." + raise NotImplementedError(msg) + + layout = mixture.layout + if not layout.measurement_slots and not layout.observed_factor_slots: + msg = "Mixture layout has no slots; cannot run minimum distance." + raise ValueError(msg) + + struct = _build_structure(layout, processed_model) + n_components = mixture.weights.shape[0] + n_total, slices = _pack_layout(struct, n_components) + + target_means = mixture.means.copy() + target_covs = mixture.covariances.copy() + + flat0 = _initial_guess( + struct, + slices, + n_components=n_components, + n_total=n_total, + target_means=target_means, + target_covs=target_covs, + ) + lower = _lower_bounds(struct, slices, n_total) + + def fun(theta: np.ndarray) -> float: + return _objective( + theta, + struct, + slices, + n_components=n_components, + mixture_weights=mixture.weights, + target_means=target_means, + target_covs=target_covs, + ) + + result = om.minimize( + fun=fun, + params=flat0, + algorithm=algorithm, + bounds=om.Bounds(lower=lower), + ) + success = bool(result.success) + flat_opt = np.asarray(result.params, dtype=float) + sigma2, omegas, mu, lambda_mat, intercept = _unpack( + flat_opt, + struct, + slices, + n_components=n_components, + mixture_weights=mixture.weights, + ) + + loadings_df = _loadings_dataframe(struct, layout, lambda_mat) + intercepts_df = _intercepts_dataframe(layout, intercept) + meas_sds_df = _meas_sds_dataframe(layout, np.sqrt(np.clip(sigma2, 0.0, None))) + + return MinimumDistanceResult( + loadings=loadings_df, + measurement_intercepts=intercepts_df, + measurement_sds=meas_sds_df, + factor_mixture_means=mu, + factor_mixture_covariances=omegas, + factor_period_slots=struct.factor_period_slots, + objective_value=float(result.fun), + success=success, + ) + + +def _loadings_dataframe( + struct: _Structure, + layout: AugmentedMeasureLayout, + lambda_mat: np.ndarray, +) -> pd.DataFrame: + """Return a long-format Lambda DataFrame, one row per nonzero entry.""" + rows = [] + aug_idx_to_meta: dict[int, tuple[int, str, str]] = dict( + zip(layout.measurement_slots, layout.measurement_meta, strict=True) + ) + slot_to_id = {sp: i for i, sp in enumerate(struct.factor_period_slots)} + for slot, meta in aug_idx_to_meta.items(): + period, factor, meas = meta + col = slot_to_id[(period, factor)] + rows.append( + { + "period": period, + "measurement": meas, + "factor": factor, + "loading": float(lambda_mat[slot, col]), + } + ) + if not rows: + return pd.DataFrame( + columns=["period", "measurement", "factor", "loading"] + ).set_index(["period", "measurement", "factor"]) + return pd.DataFrame(rows).set_index(["period", "measurement", "factor"]) + + +def _intercepts_dataframe( + layout: AugmentedMeasureLayout, + intercept: np.ndarray, +) -> pd.DataFrame: + rows = [] + for slot, (period, _factor, meas) in zip( + layout.measurement_slots, layout.measurement_meta, strict=True + ): + rows.append( + { + "period": period, + "measurement": meas, + "intercept": float(intercept[slot]), + } + ) + if not rows: + return pd.DataFrame(columns=["period", "measurement", "intercept"]).set_index( + ["period", "measurement"] + ) + return pd.DataFrame(rows).set_index(["period", "measurement"]) + + +def _meas_sds_dataframe( + layout: AugmentedMeasureLayout, + sds: np.ndarray, +) -> pd.DataFrame: + rows = [] + for slot, (period, _factor, meas) in zip( + layout.measurement_slots, layout.measurement_meta, strict=True + ): + rows.append({"period": period, "measurement": meas, "sd": float(sds[slot])}) + if not rows: + return pd.DataFrame(columns=["period", "measurement", "sd"]).set_index( + ["period", "measurement"] + ) + return pd.DataFrame(rows).set_index(["period", "measurement"]) diff --git a/src/skillmodels/amn/mixture_em.py b/src/skillmodels/amn/mixture_em.py new file mode 100644 index 00000000..7c09303a --- /dev/null +++ b/src/skillmodels/amn/mixture_em.py @@ -0,0 +1,302 @@ +"""Stage 1 of the AMN estimator: mixture-of-normals EM on augmented measurements. + +Fits + + F_{M,X} = sum_k tau_k * Normal(Pi_k, Psi_k) + +to the joint vector of (factor measurements, observed factor values, +controls) across all periods. Matches AMN 2020 equations (11)-(14). + +The fitted mixture is the reduced-form input to Stage 2's structural +minimum-distance recovery (`skillmodels.amn.minimum_distance`). +""" + +from collections.abc import Mapping +from itertools import chain + +import numpy as np +import pandas as pd +from sklearn.mixture import GaussianMixture + +from skillmodels.amn.types import AugmentedMeasureLayout, MixtureFitResult +from skillmodels.common.types import ProcessedModel + + +def build_augmented_measure_layout( + processed_model: ProcessedModel, +) -> AugmentedMeasureLayout: + """Compute the column layout of the augmented measure vector. + + The augmented vector concatenates, in order: + + 1. Factor measurements at each period (one slot per `(period, + measurement)` row of `processed_model.update_info`). + 2. Observed factor values at each period (one slot per `(period, + observed_factor)` pair). + 3. Controls at the first period (treated as time-invariant; one slot + per non-constant control). + + Slots 2 and 3 are treated as zero-measurement-error observations + with loading 1 in the AMN measurement-system mapping (paper p. 2522: + "we set the corresponding standard deviation in Sigma to zero and + the corresponding factor loading to one"). + + Args: + processed_model: The output of `common.process_model.process_model`. + + Return: + AugmentedMeasureLayout with slot metadata for downstream Stage 2 + bookkeeping. + + """ + update_info = processed_model.update_info + periods = processed_model.labels.periods + aug_to_period = processed_model.labels.aug_periods_to_periods + observed_factors = processed_model.labels.observed_factors + controls = tuple(c for c in processed_model.labels.controls if c != "constant") + + columns: list[str] = [] + measurement_slots: list[int] = [] + measurement_meta: list[tuple[int, str, str]] = [] + observed_factor_slots: list[int] = [] + observed_factor_meta: list[tuple[int, str]] = [] + control_slots: list[int] = [] + + # Walk update_info rows in canonical (aug_period, measurement) order. + # Each row is one measurement update; map aug_period -> calendar period + # via labels.aug_periods_to_periods so the layout metadata is in + # AMN-paper terms (calendar period). + factor_columns = [c for c in update_info.columns if c != "purpose"] + for index, row in update_info.iterrows(): + aug_period, meas_name = index # ty: ignore[not-iterable] + purpose = row.get("purpose", "measurement") + if purpose != "measurement": + continue + loadings = row[factor_columns].astype(bool) + if not loadings.any(): + continue + factor = next(f for f in factor_columns if loadings[f]) + period = int(aug_to_period[int(aug_period)]) + slot = len(columns) + columns.append(f"meas[{period}|{factor}|{meas_name}]") + measurement_slots.append(slot) + measurement_meta.append((period, str(factor), str(meas_name))) + + for period in periods: + for of in observed_factors: + slot = len(columns) + columns.append(f"obs_factor[{period}|{of}]") + observed_factor_slots.append(slot) + observed_factor_meta.append((int(period), str(of))) + + for ctrl in controls: + slot = len(columns) + columns.append(f"control[{ctrl}]") + control_slots.append(slot) + + return AugmentedMeasureLayout( + columns=tuple(columns), + measurement_slots=tuple(measurement_slots), + observed_factor_slots=tuple(observed_factor_slots), + control_slots=tuple(control_slots), + measurement_meta=tuple(measurement_meta), + observed_factor_meta=tuple(observed_factor_meta), + control_meta=tuple(controls), + ) + + +def _build_period_views( + data: pd.DataFrame, + periods: tuple[int, ...], + period_level: str, + caseids: pd.Index, +) -> dict[int, pd.DataFrame]: + """Return one (n_obs, n_cols) DataFrame per period, reindexed by caseids.""" + period_views: dict[int, pd.DataFrame] = {} + for period in periods: + sub = data.xs(period, level=period_level, drop_level=True) + if isinstance(sub, pd.Series): + sub = sub.to_frame() + sub = sub.reindex(caseids) + period_views[int(period)] = sub + return period_views + + +def _fill_controls( + out: np.ndarray, + period_views: dict[int, pd.DataFrame], + layout: AugmentedMeasureLayout, + periods: tuple[int, ...], +) -> None: + """Fill control slots from the first period each control is observed in.""" + for slot, ctrl in zip(layout.control_slots, layout.control_meta, strict=True): + for period in periods: + sub = period_views[int(period)] + if ctrl not in sub.columns: + continue + col = sub[ctrl].to_numpy() + mask = np.isnan(out[:, slot]) + out[mask, slot] = col[mask] + + +def build_augmented_measure_matrix( + data: pd.DataFrame, + processed_model: ProcessedModel, + layout: AugmentedMeasureLayout, +) -> np.ndarray: + """Stack each child's augmented measure vector into an ``(n_obs, n_aug)`` matrix. + + Reshapes the long-format `data` into one row per individual (caseid), + pulling the right column for each layout slot from the corresponding + period. + + Args: + data: Panel dataset in long format with MultiIndex + ``(caseid, period)``. + processed_model: Output of `process_model.process_model`. + layout: Slot layout for the augmented vector. + + Return: + ``(n_obs, n_aug)`` numpy array. Missing values are NaN. + + """ + if not isinstance(data.index, pd.MultiIndex) or data.index.nlevels < 2: + msg = "data must have a 2-level MultiIndex (caseid, period)." + raise ValueError(msg) + period_level = str(data.index.names[1]) + case_level = str(data.index.names[0]) + + caseids = data.index.get_level_values(case_level).unique() + n_obs = len(caseids) + n_aug = len(layout.columns) + out = np.full((n_obs, n_aug), np.nan) + + periods = processed_model.labels.periods + period_views = _build_period_views(data, periods, period_level, caseids) + + for slot, (period, _factor, meas_name) in zip( + layout.measurement_slots, layout.measurement_meta, strict=True + ): + sub = period_views[period] + if meas_name in sub.columns: + out[:, slot] = sub[meas_name].to_numpy() + + for slot, (period, of_name) in zip( + layout.observed_factor_slots, layout.observed_factor_meta, strict=True + ): + sub = period_views[period] + if of_name in sub.columns: + out[:, slot] = sub[of_name].to_numpy() + + if layout.control_slots: + _fill_controls(out, period_views, layout, periods) + + return out + + +def fit_mixture_em( + augmented: np.ndarray, + *, + n_components: int, + max_iter: int = 500, + tol: float = 1e-6, + n_init: int = 5, + reg_covar: float = 1e-6, + seed: int = 0, + layout: AugmentedMeasureLayout | None = None, + init_params: Mapping[str, np.ndarray] | None = None, +) -> MixtureFitResult: + """Fit a Gaussian mixture to the augmented measure matrix via EM. + + Uses `sklearn.mixture.GaussianMixture` under the hood with k-means + initialization and multiple restarts. Rows containing any NaN are + dropped before fitting (listwise complete-case); a future revision + will integrate over missing dimensions in the E-step. + + Args: + augmented: ``(n_obs, n_aug)`` augmented measure matrix from + `build_augmented_measure_matrix`. + n_components: Number of mixture components K. + max_iter: Maximum EM iterations per restart. + tol: Log-likelihood convergence tolerance. + n_init: Number of EM restarts; the best fit is kept. + reg_covar: Diagonal ridge added to each component covariance for + numerical stability. + seed: RNG seed. + layout: Slot layout to embed in the result (carried through to + Stage 2). + init_params: Optional warm-start values. Currently unused — kept + for forward-compatibility with a custom Spearman-seeded init + once Stage 1 results from the moment-init pipeline become + available as warm starts. + + Return: + MixtureFitResult holding the fitted weights, means, covariances + and convergence diagnostics. + + """ + del init_params # reserved for follow-up + if augmented.ndim != 2: + msg = "augmented must be a 2D array." + raise ValueError(msg) + if augmented.shape[0] == 0: + msg = "augmented has zero rows; cannot fit mixture." + raise ValueError(msg) + + complete_mask = ~np.isnan(augmented).any(axis=1) + n_complete = int(complete_mask.sum()) + if n_complete < n_components: + msg = ( + f"Only {n_complete} complete-case rows available for " + f"{n_components}-component mixture." + ) + raise ValueError(msg) + fit_data = augmented[complete_mask] + + gm = GaussianMixture( + n_components=n_components, + covariance_type="full", + max_iter=max_iter, + tol=tol, + n_init=n_init, + reg_covar=reg_covar, + init_params="kmeans", + random_state=seed, + ) + gm.fit(fit_data) + + if layout is None: + # Caller didn't supply a layout; synthesize a minimal one purely + # from column indices so downstream code that doesn't need slot + # metadata still works. + n_aug = augmented.shape[1] + layout = AugmentedMeasureLayout( + columns=tuple(f"col[{i}]" for i in range(n_aug)), + measurement_slots=tuple(range(n_aug)), + observed_factor_slots=(), + control_slots=(), + measurement_meta=(), + observed_factor_meta=(), + control_meta=(), + ) + + return MixtureFitResult( + weights=np.asarray(gm.weights_, dtype=float), + means=np.asarray(gm.means_, dtype=float), + covariances=np.asarray(gm.covariances_, dtype=float), + loglikelihood=float(gm.score(fit_data) * n_complete), + n_iter=int(gm.n_iter_), + converged=bool(gm.converged_), + layout=layout, + ) + + +def _all_slot_ids(layout: AugmentedMeasureLayout) -> tuple[int, ...]: + """Return the union of all slot id tuples in canonical order.""" + return tuple( + chain( + layout.measurement_slots, + layout.observed_factor_slots, + layout.control_slots, + ) + ) diff --git a/src/skillmodels/amn/moments.py b/src/skillmodels/amn/moments.py new file mode 100644 index 00000000..7d46883d --- /dev/null +++ b/src/skillmodels/amn/moments.py @@ -0,0 +1,301 @@ +"""Spearman / multi-indicator moment estimators for starting values. + +Pure NumPy helpers used to seed optimizer starting values from data +moments instead of static defaults (sigma_inv = 0.5 etc.). They derive +loadings, measurement-error SDs, and latent-factor variances from the +cross-covariance structure of multi-indicator measurements — the +standard Spearman / factor-analysis identification. + +Used by both the AF estimator (chain-wide moment seeds in +`af.initial_period` / `af.transition_period`) and the CHS estimator +(via `skillmodels.amn.start_values.get_spearman_start_params`). + +This module is called once before optimization (no JAX dependency) and +exposes single-pass, robust estimators with floor clamps for numerical +edge cases. +""" + +from dataclasses import dataclass + +import numpy as np + + +@dataclass(frozen=True) +class SpearmanResult: + """Single-factor Spearman moment estimates from cross-covariances.""" + + loadings: np.ndarray + """Recovered loadings, shape ``(n_meas,)``. The anchor entry equals 1.0 + by construction (or the user-provided anchor value).""" + + meas_sds: np.ndarray + """Recovered measurement-error SDs, shape ``(n_meas,)``.""" + + latent_var: float + """Recovered latent-factor variance Var(F).""" + + valid: bool + """False when identification fails (anchor uncorrelated with all other + measurements, or fewer than two measurements available).""" + + +def spearman_factor_moments( + measurements: np.ndarray, + *, + anchor_idx: int = 0, + anchor_loading: float = 1.0, + sd_floor: float = 1e-3, + var_floor: float = 1e-6, +) -> SpearmanResult: + """Recover loadings, sigma_meas, Var(F) from multi-indicator covariances. + + For a single latent factor F observed via ``measurements[:, k] = λ_k F + + ε_k`` (after residualizing out controls), the off-diagonal covariances + identify the loadings up to scale and the diagonal residual variances + give sigma_meas². Anchor measurement ``anchor_idx`` is normalized so its + loading equals ``anchor_loading``. + + Algorithm (pairwise complete cases): + + * ``S = pairwise_cov(measurements)``. + * Pool ``Var(F)`` via robust median across triples ``S[a,j] S[a,k] / + S[j,k]`` for ``j ≠ k ≠ a``. + * ``λ_k = S[a, k] / Var(F)`` for ``k ≠ a`` (then rescaled so anchor + matches ``anchor_loading``). + * ``sigma_meas_k² = max(S[k, k] - λ_k² Var(F), sd_floor²)``. + + If the anchor's covariances with all other measurements are below + numerical noise, rotate to a different anchor and retry. If all + candidates fail, return ``valid=False``. + + Args: + measurements: Shape ``(n_obs, n_meas)``. NaN values are handled via + pairwise-complete cases. + anchor_idx: Index of the anchor measurement. Loadings are reported + on a scale where ``loadings[anchor_idx] == anchor_loading``. + anchor_loading: Pinned anchor loading (typically 1.0 from a + normalization). + sd_floor: Minimum returned measurement SD to avoid zero / negative + estimates from sample noise. + var_floor: Minimum returned latent variance. + + Return: + `SpearmanResult` with recovered loadings, sigma_meas, latent_var, and a + `valid` flag. + + """ + arr = np.asarray(measurements, dtype=float) + if arr.ndim != 2: + msg = f"measurements must be 2D; got shape {arr.shape}" + raise ValueError(msg) + n_meas = arr.shape[1] + if n_meas < 2: + return SpearmanResult( + loadings=np.full(n_meas, anchor_loading), + meas_sds=np.full(n_meas, sd_floor), + latent_var=var_floor, + valid=False, + ) + + s = _pairwise_cov(arr) + + # Try the requested anchor first; rotate through other candidates if + # it has no usable cross-covariances. + anchor_order = [anchor_idx, *(k for k in range(n_meas) if k != anchor_idx)] + for candidate in anchor_order: + result = _spearman_with_anchor( + s, + anchor=candidate, + anchor_loading=anchor_loading, + target_anchor=anchor_idx, + sd_floor=sd_floor, + var_floor=var_floor, + ) + if result is not None: + return result + + return SpearmanResult( + loadings=np.full(n_meas, anchor_loading), + meas_sds=np.full(n_meas, sd_floor), + latent_var=var_floor, + valid=False, + ) + + +def derive_unexplained_sd( + latent_var: float, + beta: np.ndarray, + prev_state_cov: np.ndarray, + *, + sd_floor: float = 1e-3, +) -> float: + """Return the residual SD of a regression with explained variance β'Σβ. + + Given a regression ``F = β'·prev_state + ε`` where ``Var(prev_state) = + Σ`` and ``Var(F) = latent_var``, the residual variance is ``Var(ε) = + Var(F) - β'Σβ``. Clamped at ``sd_floor`` to avoid NaN when sample noise + pushes ``β'Σβ`` above ``Var(F)``. + + Used to seed sigma_shock (production shock SD) and sigma_inv (investment shock + SD) from the latent factor variance plus the regression coefficients. + + Args: + latent_var: Marginal variance of the dependent factor. + beta: Regression coefficients, shape ``(n_state,)``. + prev_state_cov: Covariance matrix of the regressors, shape + ``(n_state, n_state)``. + sd_floor: Minimum returned SD. + + Return: + ``sqrt(max(latent_var - β'Σβ, sd_floor²))``. + + """ + beta = np.asarray(beta, dtype=float).ravel() + cov = np.asarray(prev_state_cov, dtype=float) + explained = float(beta @ cov @ beta) + residual_var = max(float(latent_var) - explained, sd_floor**2) + return float(np.sqrt(residual_var)) + + +def seed_beta_from_ols( + response: np.ndarray, + regressors: np.ndarray, +) -> np.ndarray: + """OLS coefficient estimate for seeding inv-equation β. + + Pure-numpy OLS of ``response`` (n_obs,) on ``regressors`` (n_obs, + n_features). Drops rows with any NaN. Returns zeros when the design + is rank-deficient. + + Args: + response: Shape ``(n_obs,)``. + regressors: Shape ``(n_obs, n_features)``. + + Return: + β estimate, shape ``(n_features,)``. Zero vector if the design is + rank-deficient or the sample is too small. + + """ + y = np.asarray(response, dtype=float).ravel() + x = np.asarray(regressors, dtype=float) + if x.ndim == 1: + x = x[:, None] + n_features = x.shape[1] + mask = np.isfinite(y) & np.all(np.isfinite(x), axis=1) + if mask.sum() <= n_features: + return np.zeros(n_features) + try: + coef, *_ = np.linalg.lstsq(x[mask], y[mask], rcond=None) + except np.linalg.LinAlgError: + return np.zeros(n_features) + if not np.all(np.isfinite(coef)): + return np.zeros(n_features) + return coef + + +def _pairwise_cov(arr: np.ndarray) -> np.ndarray: + """Compute pairwise-complete sample covariance matrix. + + Each entry ``S[i, j]`` is the sample covariance over rows where both + columns ``i`` and ``j`` are finite. Diagonal entries are sample + variances over rows where the column is finite. + """ + n_meas = arr.shape[1] + s = np.zeros((n_meas, n_meas)) + finite = np.isfinite(arr) + for i in range(n_meas): + for j in range(i, n_meas): + mask = finite[:, i] & finite[:, j] + if mask.sum() < 2: + s[i, j] = s[j, i] = 0.0 + continue + xi = arr[mask, i] + xj = arr[mask, j] + mi = xi.mean() + mj = xj.mean() + cov = float(((xi - mi) * (xj - mj)).sum() / (mask.sum() - 1)) + s[i, j] = s[j, i] = cov + return s + + +def _spearman_with_anchor( # noqa: C901, PLR0912 + s: np.ndarray, + *, + anchor: int, + anchor_loading: float, + target_anchor: int, + sd_floor: float, + var_floor: float, +) -> SpearmanResult | None: + """Spearman estimates with a specified anchor; ``None`` if degenerate.""" + n_meas = s.shape[0] + diag = np.maximum(np.diag(s), sd_floor**2) + sds = np.sqrt(diag) + cov_threshold = 1e-3 * sds[anchor] * sds + + # The anchor must covary meaningfully with at least one other column. + cross = np.array( + [ + (k, abs(s[anchor, k])) + for k in range(n_meas) + if k != anchor and abs(s[anchor, k]) > cov_threshold[k] + ] + ) + if cross.size == 0: + return None + + # Pool Var(F) via the median of triples S[a,j] S[a,k] / S[j,k] for + # j, k != a, j != k, with S[j,k] above noise. + triples = [] + for j in range(n_meas): + if j == anchor or abs(s[anchor, j]) <= cov_threshold[j]: + continue + for k in range(j + 1, n_meas): + if k == anchor or abs(s[anchor, k]) <= cov_threshold[k]: + continue + cross_threshold = 1e-3 * sds[j] * sds[k] + if abs(s[j, k]) <= cross_threshold: + continue + triples.append(s[anchor, j] * s[anchor, k] / s[j, k]) + + if not triples: + # Only one measurement covaries with the anchor — Var(F) is + # under-identified. Fall back to S[anchor, k] / S[k, k] times + # diagonal (rough), then clamp. + partner_idx = int(cross[np.argmax(cross[:, 1]), 0]) + latent_var_raw = abs(s[anchor, partner_idx]) + else: + latent_var_raw = float(np.median(triples)) + + latent_var = max(latent_var_raw, var_floor) + + raw_loadings = np.zeros(n_meas) + raw_loadings[anchor] = 1.0 + for k in range(n_meas): + if k == anchor: + continue + raw_loadings[k] = s[anchor, k] / latent_var + + # Rescale so the user-supplied target anchor reports ``anchor_loading``. + # If we rotated to a different anchor candidate, the recovered scale + # must be re-anchored on ``target_anchor``. + if target_anchor != anchor: + if abs(raw_loadings[target_anchor]) <= 1e-12: + return None + scale = anchor_loading / raw_loadings[target_anchor] + else: + scale = anchor_loading + loadings = raw_loadings * scale + # Var(F) absorbs the inverse square of the rescale. + latent_var = latent_var / (scale**2) + latent_var = max(latent_var, var_floor) + + meas_var = np.maximum(diag - loadings**2 * latent_var, sd_floor**2) + meas_sds = np.sqrt(meas_var) + + return SpearmanResult( + loadings=loadings, + meas_sds=meas_sds, + latent_var=latent_var, + valid=True, + ) diff --git a/src/skillmodels/amn/posterior_states.py b/src/skillmodels/amn/posterior_states.py new file mode 100644 index 00000000..086b34e5 --- /dev/null +++ b/src/skillmodels/amn/posterior_states.py @@ -0,0 +1,191 @@ +"""Per-individual posterior latent-factor estimates from an AMN fit. + +AMN does not Kalman-filter or quadrature-integrate; it fits a mixture +of normals on the augmented measure vector. The natural per-individual +factor estimate is therefore the mixture-Schur conditional posterior +``E[theta | Y_i]`` evaluated under the fitted reduced-form parameters, +mirrored across the K components weighted by per-individual mixture +responsibilities. + +For every observation `i` and every mixture component `k`: + + mu_{theta|Y}(k, i) = mu_theta(k) + + Cov(theta, Y)(k) Cov(Y)(k)^{-1} (Y_i - mu_Y(k)) + +where ``mu_Y(k) = A + Lambda mu_theta(k)``, +``Cov(Y)(k) = Lambda Omega(k) Lambda^T + diag(sigma^2)``, and +``Cov(theta, Y)(k) = Omega(k) Lambda^T``. The mixture responsibility is the +standard Bayes posterior of `k` given `Y_i`, and +``E[theta | Y_i] = sum_k r(k|i) mu_{theta|Y}(k, i)``. + +The function returns a dict matching the CHS / AF +`get_filtered_states` shape (an ``"unanchored_states"`` entry only — +AMN does not produce anchored states without an explicit anchoring +post-step). +""" + +from typing import Any + +import numpy as np +import pandas as pd + +from skillmodels.amn.mixture_em import build_augmented_measure_matrix +from skillmodels.amn.types import AMNEstimationResult +from skillmodels.chs.process_debug_data import create_state_ranges +from skillmodels.common.process_model import process_model + + +def get_amn_posterior_states( # noqa: C901, PLR0912, PLR0915 + amn_result: AMNEstimationResult, + data: pd.DataFrame, +) -> dict[str, dict[str, Any]]: + """Compute the per-observation latent factor posteriors. + + Args: + amn_result: The fitted AMN result. + data: Same panel dataset used for the original fit. + + Return: + Nested dict with the CHS-compatible + ``{"unanchored_states": {"states": DataFrame, "state_ranges": ...}}`` + layout (no ``"anchored_states"`` key — AMN does not anchor). + + """ + processed_model = process_model(amn_result.model_spec) + layout = amn_result.stages.mixture.layout + augmented = build_augmented_measure_matrix(data, processed_model, layout) + n_aug = augmented.shape[1] + + mixture = amn_result.stages.mixture + structural = amn_result.stages.structural + + # Build Lambda and intercepts in the original AMN structural basis. + n_components = mixture.weights.shape[0] + factor_slots = structural.factor_period_slots + n_factor = len(factor_slots) + + # Reconstruct Lambda from the loadings DataFrame + observed-factor + # / control passthrough. + lambda_mat = np.zeros((n_aug, n_factor)) + slot_to_id = {sp: i for i, sp in enumerate(factor_slots)} + + for aug_idx, (period, factor, meas) in zip( + layout.measurement_slots, layout.measurement_meta, strict=True + ): + col = slot_to_id.get((period, factor)) + if col is None: + continue + try: + loading = structural.loadings.loc[(period, meas, factor), "loading"] + except KeyError: + loading = 1.0 + lambda_mat[aug_idx, col] = float(loading) + + for aug_idx, (period, of_name) in zip( + layout.observed_factor_slots, layout.observed_factor_meta, strict=True + ): + col = slot_to_id.get((period, of_name)) + if col is not None: + lambda_mat[aug_idx, col] = 1.0 + + for aug_idx, ctrl in zip(layout.control_slots, layout.control_meta, strict=True): + col = slot_to_id.get((-1, ctrl)) + if col is not None: + lambda_mat[aug_idx, col] = 1.0 + + intercept = np.zeros(n_aug) + for aug_idx, (period, _factor, meas) in zip( + layout.measurement_slots, layout.measurement_meta, strict=True + ): + try: + intercept[aug_idx] = float( + structural.measurement_intercepts.loc[(period, meas), "intercept"] + ) + except KeyError: + intercept[aug_idx] = 0.0 + + sigma2 = np.zeros(n_aug) + for aug_idx, (period, _factor, meas) in zip( + layout.measurement_slots, layout.measurement_meta, strict=True + ): + try: + sd = float(structural.measurement_sds.loc[(period, meas), "sd"]) + except KeyError: + sd = 0.0 + sigma2[aug_idx] = sd * sd + + diag_sigma = np.diag(sigma2) + + # Drop rows with any NaN in the augmented vector (listwise; matches + # Stage 1's complete-case behaviour). Posterior is reported only + # for complete-case observations. + complete_mask = ~np.isnan(augmented).any(axis=1) + y_complete = augmented[complete_mask] + + # Precompute per-component pieces. + mu_theta = structural.factor_mixture_means + omegas = structural.factor_mixture_covariances + mu_y_per = np.empty((n_components, n_aug)) + cov_y_inv = np.empty((n_components, n_aug, n_aug)) + cov_theta_y = np.empty((n_components, n_factor, n_aug)) + log_det = np.empty(n_components) + for k in range(n_components): + mu_y_per[k] = intercept + lambda_mat @ mu_theta[k] + cov_y = lambda_mat @ omegas[k] @ lambda_mat.T + diag_sigma + cov_y = 0.5 * (cov_y + cov_y.T) + 1e-10 * np.eye(n_aug) + cov_y_inv[k] = np.linalg.inv(cov_y) + cov_theta_y[k] = omegas[k] @ lambda_mat.T + sign, logdet = np.linalg.slogdet(cov_y) + log_det[k] = logdet if sign > 0 else np.inf + + # Per-obs log-pdf in each component (up to a constant). + log_pi = np.log(np.clip(mixture.weights, 1e-300, None)) + diffs = y_complete[:, None, :] - mu_y_per[None, :, :] # (n_complete, K, n_aug) + quad = np.einsum("ikj,kjl,ikl->ik", diffs, cov_y_inv, diffs) + log_probs = log_pi[None, :] - 0.5 * (log_det[None, :] + quad) + log_probs -= log_probs.max(axis=1, keepdims=True) + probs = np.exp(log_probs) + responsibilities = probs / probs.sum(axis=1, keepdims=True) + + # Per-component conditional mean of theta given Y_i. + cond_means = np.empty((y_complete.shape[0], n_components, n_factor)) + for k in range(n_components): + cond_means[:, k, :] = ( + mu_theta[k] + (cov_theta_y[k] @ cov_y_inv[k] @ diffs[:, k, :].T).T + ) + + # Mixture-averaged posterior mean of theta. + posterior = np.einsum("ik,ikj->ij", responsibilities, cond_means) + + # Stuff into a (id, period) -> (factor, ...) DataFrame. + case_level = str(data.index.names[0]) + caseids = data.index.get_level_values(case_level).unique() + complete_caseids = caseids[np.asarray(complete_mask, dtype=bool)] + + latent_factors = processed_model.labels.latent_factors + periods = processed_model.labels.periods + rows = [] + for row_idx, caseid in enumerate(complete_caseids): + for period in periods: + row: dict[str, Any] = {"id": caseid, "period": int(period)} + for factor in latent_factors: + col_idx = slot_to_id.get((int(period), factor)) + row[factor] = ( + float(posterior[row_idx, col_idx]) + if col_idx is not None + else np.nan + ) + rows.append(row) + states_df = pd.DataFrame(rows) + + state_ranges = create_state_ranges( + filtered_states=states_df, + factors=latent_factors, + ) + + return { + "unanchored_states": { + "states": states_df, + "state_ranges": state_ranges, + }, + } diff --git a/src/skillmodels/amn/simulate_and_regress.py b/src/skillmodels/amn/simulate_and_regress.py new file mode 100644 index 00000000..4ba8f029 --- /dev/null +++ b/src/skillmodels/amn/simulate_and_regress.py @@ -0,0 +1,450 @@ +"""Stage 3 of the AMN estimator: simulate latent factors and regress. + +Draws a synthetic latent-factor panel from the structural mixture +fitted in Stage 2 and recovers the per-period transition / investment +parameters by least-squares regression. + +Specialised fitters: closed-form OLS for `linear`; softmax-constrained +Levenberg-Marquardt for `log_ces` and `log_ces_with_constant` (keeps +gammas on the simplex). Everything else (translog, robust_translog, +linear_and_squares, log_ces_general, and any user +`@register_params`-decorated transition) goes through a generic NLS +path that calls the transition function directly via `jax.vmap`. This +mirrors the per-factor NLS in +`Monte Carlo Simulations/master_approx_simulationces2periodrho_5.R` +but generalises beyond the paper's CES-only case. +""" + +import inspect +from collections.abc import Callable + +import jax +import jax.numpy as jnp +import numpy as np +import pandas as pd +from scipy.optimize import least_squares + +from skillmodels.amn.types import ( + MinimumDistanceResult, + ProductionFitResult, +) +from skillmodels.common.model_spec import ModelSpec +from skillmodels.common.types import ProcessedModel + + +def _draw_factor_panel( + structural: MinimumDistanceResult, + mixture_weights: np.ndarray, + *, + n_draws: int, + seed: int, +) -> pd.DataFrame: + """Sample ``n_draws`` rows from the K-component Gaussian mixture. + + Returns a DataFrame with one column per ``(period, factor)`` slot. + """ + rng = np.random.default_rng(seed) + means = structural.factor_mixture_means + covs = structural.factor_mixture_covariances + n_components, n_factor = means.shape + + counts = np.floor(n_draws * mixture_weights).astype(int) + deficit = n_draws - counts.sum() + if deficit > 0: + order = np.argsort(-(n_draws * mixture_weights - counts)) + for idx in order[:deficit]: + counts[idx] += 1 + + chunks = [] + for k in range(n_components): + if counts[k] == 0: + continue + cov = covs[k] + cov = 0.5 * (cov + cov.T) + 1e-10 * np.eye(n_factor) + samples = rng.multivariate_normal(means[k], cov, size=counts[k]) + chunks.append(samples) + panel = np.vstack(chunks) + rng.shuffle(panel) + + columns = [f"f[{t}|{f}]" for t, f in structural.factor_period_slots] + return pd.DataFrame(panel, columns=columns) + + +def _slot_column(period: int, factor: str) -> str: + return f"f[{period}|{factor}]" + + +def _fit_linear( + y: np.ndarray, + x_design: np.ndarray, + regressor_names: list[str], +) -> tuple[dict[str, float], float]: + """OLS regression with an intercept (added as the last column). + + Returns: + ``(params_by_name, residual_sd)`` with `constant` included as + the trailing parameter. + + """ + n = x_design.shape[0] + full_design = np.column_stack([x_design, np.ones(n)]) + coefs, *_ = np.linalg.lstsq(full_design, y, rcond=None) + resid = y - full_design @ coefs + sd = float(np.sqrt(np.mean(resid**2))) + out = dict(zip([*regressor_names, "constant"], coefs.tolist(), strict=True)) + return out, sd + + +def _fit_log_ces( + y: np.ndarray, + x_design: np.ndarray, + regressor_names: list[str], + *, + with_constant: bool, +) -> tuple[dict[str, float], float]: + """Fit log_ces (or log_ces_with_constant) via Levenberg-Marquardt. + + Parametrises ``y = delta + (1/rho) * log(sum_i gamma_i * exp(X_i * rho))`` + with gammas constrained to the simplex via softmax. When + ``with_constant=False``, the additive ``delta`` is held at 0. + """ + n_reg = len(regressor_names) + eps = 1e-12 + + def residuals(theta: np.ndarray) -> np.ndarray: + logits = np.concatenate([theta[: n_reg - 1], [0.0]]) + gammas = np.exp(logits - logits.max()) + gammas = gammas / gammas.sum() + rho = theta[n_reg - 1] + constant = theta[n_reg] if with_constant else 0.0 + exponents = x_design * rho + max_exp = np.max(exponents, axis=1, keepdims=True) + shifted = np.exp(exponents - max_exp) + log_inside = np.log(np.clip((gammas * shifted).sum(axis=1), eps, None)) + pred = constant + (max_exp[:, 0] + log_inside) / rho + return pred - y + + n_unknowns = n_reg + (1 if with_constant else 0) + theta0 = np.zeros(n_unknowns) + theta0[n_reg - 1] = 0.5 + result = least_squares(residuals, theta0, method="lm", max_nfev=2000) + theta = result.x + logits = np.concatenate([theta[: n_reg - 1], [0.0]]) + gammas = np.exp(logits - logits.max()) + gammas = gammas / gammas.sum() + rho = float(theta[n_reg - 1]) + constant = float(theta[n_reg]) if with_constant else 0.0 + resid = residuals(theta) + sd = float(np.sqrt(np.mean(resid**2))) + + out: dict[str, float] = dict(zip(regressor_names, gammas.tolist(), strict=True)) + out["phi"] = rho + if with_constant: + out["constant"] = constant + return out, sd + + +def _make_user_transition_callable( + user_func: Callable, + factor_names: tuple[str, ...], + param_names: tuple[str, ...], +) -> Callable[[jnp.ndarray, jnp.ndarray], jnp.ndarray]: + """Wrap a `@register_params`-decorated user function as `(states, params)`. + + Mirrors `skillmodels.af.transition_period._wrap_registered_transition_function` + so Stage 3 can pass user transitions through `jax.vmap` for NLS. + """ + sig = inspect.signature(user_func) + arg_names = [name for name in sig.parameters if name != "params"] + arg_positions = tuple(factor_names.index(name) for name in arg_names) + + def wrapped(states: jnp.ndarray, params_vec: jnp.ndarray) -> jnp.ndarray: + kwargs: dict[str, jnp.ndarray | dict[str, jnp.ndarray]] = { + name: states[pos] + for name, pos in zip(arg_names, arg_positions, strict=True) + } + kwargs["params"] = dict(zip(param_names, params_vec, strict=True)) + return user_func(**kwargs) + + return wrapped + + +def _resolve_transition_callable( + transition_name: str, + factor: str, + processed_model: ProcessedModel, + model_spec: ModelSpec, +) -> tuple[Callable[[jnp.ndarray, jnp.ndarray], jnp.ndarray], tuple[str, ...]]: + """Return a ``(states, params) -> scalar`` callable plus param names. + + For built-in transitions this is the function imported from + `skillmodels.common.transition_functions`; for user functions it is + `_make_user_transition_callable(...)` applied to the raw callable on + the model spec. + """ + from skillmodels.common import transition_functions as tf # noqa: PLC0415 + + builtin_names = { + "linear", + "translog", + "robust_translog", + "linear_and_squares", + "log_ces", + "log_ces_with_constant", + "log_ces_general", + } + factor_names = ( + *processed_model.labels.latent_factors, + *processed_model.labels.observed_factors, + ) + transition_info = processed_model.transition_info + if transition_info is None: + msg = "ProcessedModel has no transition_info; cannot run Stage 3." + raise ValueError(msg) + param_names = tuple(transition_info.param_names[factor]) + + if transition_name in builtin_names: + func = getattr(tf, transition_name) + return func, param_names + + factor_spec = model_spec.factors.get(factor) + if factor_spec is None: + msg = ( + f"Cannot resolve transition callable for factor '{factor}' " + f"(transition='{transition_name}'). Factor not found on " + "model_spec.factors." + ) + raise KeyError(msg) + raw = factor_spec.transition_function + if not callable(raw): + msg = ( + f"Factor '{factor}' has transition_function={raw!r} which is " + "neither a built-in name nor a callable." + ) + raise TypeError(msg) + wrapped = _make_user_transition_callable(raw, factor_names, param_names) + return wrapped, param_names + + +def _fit_generic_nls( + transition_func: Callable[[jnp.ndarray, jnp.ndarray], jnp.ndarray], + param_names: tuple[str, ...], + y: np.ndarray, + states_panel: np.ndarray, + *, + init_overrides: dict[str, float] | None = None, +) -> tuple[dict[str, float], float]: + """Generic Levenberg-Marquardt NLS via `jax.vmap` over the panel. + + Works for any `(states, params) -> scalar` callable, including + translog, robust_translog, linear_and_squares, log_ces_general, and + user-registered transitions. + + Args: + transition_func: callable taking a 1D state vector and a 1D + param vector and returning a scalar. + param_names: names of the parameters in the order accepted by + `transition_func`. + y: target vector, shape ``(n_obs,)``. + states_panel: state matrix, shape ``(n_obs, n_state_features)``. + init_overrides: optional ``{name: value}`` to seed specific + parameters before NLS. Useful for setting `phi != 0` on + log_ces-family functions. + + """ + init_overrides = init_overrides or {} + + @jax.jit + def predict_batch(theta: jnp.ndarray, states: jnp.ndarray) -> jnp.ndarray: + return jax.vmap(transition_func, in_axes=(0, None))(states, theta) + + states_jnp = jnp.asarray(states_panel) + + def residuals(theta_np: np.ndarray) -> np.ndarray: + preds = predict_batch(jnp.asarray(theta_np), states_jnp) + return np.asarray(preds) - y + + theta0 = np.zeros(len(param_names)) + for name, val in init_overrides.items(): + if name in param_names: + theta0[param_names.index(name)] = val + # phi-style elasticity defaults: any "phi", "rho", "sigma" param + # that doesn't otherwise have an override gets seeded at 0.5 so the + # CES / general-CES log expressions don't divide by zero. + for j, name in enumerate(param_names): + if name in {"phi", "rho", "sigma"} and name not in init_overrides: + theta0[j] = 0.5 + # Simplex-style "gammas" (anything listed as a factor name in the + # param list) get a uniform initial share if the function looks + # CES-shaped (has a "phi"-like param). + has_elasticity = any(n in {"phi", "rho", "sigma"} for n in param_names) + if has_elasticity: + share_candidates = [ + j + for j, n in enumerate(param_names) + if n not in {"phi", "rho", "sigma", "constant"} + ] + if share_candidates: + theta0[share_candidates] = 1.0 / len(share_candidates) + + result = least_squares(residuals, theta0, method="lm", max_nfev=5000) + theta = result.x + resid = residuals(theta) + sd = float(np.sqrt(np.mean(resid**2))) + out = dict(zip(param_names, [float(v) for v in theta], strict=True)) + return out, sd + + +def _fit_transition( + transition_name: str, + factor: str, + processed_model: ProcessedModel, + model_spec: ModelSpec, + y: np.ndarray, + x_design: np.ndarray, + regressor_names: list[str], +) -> tuple[dict[str, float], float]: + """Dispatch to the right per-transition fitter. + + `linear` and `log_ces`-family functions get specialised fitters for + speed / simplex constraints; everything else (translog, + robust_translog, linear_and_squares, log_ces_general, user) falls + through to a generic `jax.vmap`-based NLS. + """ + if transition_name == "linear": + return _fit_linear(y, x_design, regressor_names) + if transition_name == "log_ces": + return _fit_log_ces(y, x_design, regressor_names, with_constant=False) + if transition_name == "log_ces_with_constant": + return _fit_log_ces(y, x_design, regressor_names, with_constant=True) + + func, param_names = _resolve_transition_callable( + transition_name, factor, processed_model, model_spec + ) + return _fit_generic_nls(func, param_names, y, x_design) + + +def _factors_at_period(processed_model: ProcessedModel) -> tuple[str, ...]: + """Latent + observed factor names (used as transition regressors).""" + return ( + *processed_model.labels.latent_factors, + *processed_model.labels.observed_factors, + ) + + +def simulate_and_regress( # noqa: C901 + structural: MinimumDistanceResult, + processed_model: ProcessedModel, + model_spec: ModelSpec, + mixture_weights: np.ndarray, + *, + n_draws: int = 100_000, + seed: int = 0, + investment_endogeneity: bool = True, +) -> ProductionFitResult: + """Simulate the joint latent-factor distribution and run Stage-3 regressions. + + Args: + structural: Stage 2 output (structural mixture, loadings, etc.). + processed_model: Skillmodels processed model. + model_spec: Original model spec; used to look up raw transition + callables for user-registered `@register_params` functions. + mixture_weights: Per-component mixture weights from Stage 1. + n_draws: Synthetic-panel size. + seed: RNG seed. + investment_endogeneity: Reserved for future control-function + extension; currently the investment equation is fit with + plain OLS regardless. + + Return: + ProductionFitResult with production-function and investment-equation + parameter DataFrames. + + """ + del investment_endogeneity # placeholder; control function is v2 + + panel = _draw_factor_panel(structural, mixture_weights, n_draws=n_draws, seed=seed) + + periods = processed_model.labels.periods + endog_info = processed_model.endogenous_factors_info + transition_info = processed_model.transition_info + factor_to_function_name = ( + dict(transition_info.function_names) if transition_info is not None else {} + ) + + transition_rows: list[tuple[str, int, str, str, float]] = [] + investment_rows: list[tuple[str, int, str, str, float]] = [] + + for t_idx in range(len(periods) - 1): + t = int(periods[t_idx]) + t_next = int(periods[t_idx + 1]) + factor_names = _factors_at_period(processed_model) + regressor_cols = [_slot_column(t, f) for f in factor_names] + present_pairs = [ + (f, c) + for f, c in zip(factor_names, regressor_cols, strict=True) + if c in panel.columns + ] + if not present_pairs: + continue + present_factor_names = [f for f, _ in present_pairs] + x_design = panel[[c for _, c in present_pairs]].to_numpy() + + for factor in processed_model.labels.latent_factors: + is_endog = ( + factor in endog_info.factor_info + and endog_info.factor_info[factor].is_endogenous + ) + target_col = _slot_column(t_next, factor) + if target_col not in panel.columns: + continue + y = panel[target_col].to_numpy() + trans_name = factor_to_function_name.get(factor, "linear") + if trans_name == "constant": + continue + if is_endog: + params, sd = _fit_linear(y, x_design, present_factor_names) + for regname, value in params.items(): + investment_rows.append( + ("investment_eq", t, factor, regname, float(value)) + ) + investment_rows.append(("investment_sds", t, factor, "-", sd)) + else: + params, sd = _fit_transition( + trans_name, + factor, + processed_model, + model_spec, + y, + x_design, + present_factor_names, + ) + for regname, value in params.items(): + transition_rows.append( + ("transition", t, factor, regname, float(value)) + ) + transition_rows.append(("shock_sds", t, factor, "-", sd)) + + def _rows_to_df( + rows: list[tuple[str, int, str, str, float]], + ) -> pd.DataFrame: + if not rows: + return pd.DataFrame( + {"value": []}, + index=pd.MultiIndex.from_tuples( + [], names=["category", "aug_period", "name1", "name2"] + ), + ) + index = pd.MultiIndex.from_tuples( + [(c, p, n1, n2) for c, p, n1, n2, _ in rows], + names=["category", "aug_period", "name1", "name2"], + ) + values = [v for *_, v in rows] + return pd.DataFrame({"value": values}, index=index) + + return ProductionFitResult( + production_params=_rows_to_df(transition_rows), + investment_params=_rows_to_df(investment_rows), + n_draws=n_draws, + seed=seed, + ) diff --git a/src/skillmodels/amn/start_values.py b/src/skillmodels/amn/start_values.py new file mode 100644 index 00000000..67f7f1b7 --- /dev/null +++ b/src/skillmodels/amn/start_values.py @@ -0,0 +1,658 @@ +"""Moment-based starting values for the CHS estimator. + +Replaces the legacy `0.5` / `1.0` / `0.0` constant fills with +data-derived seeds. Two-stage hybrid: + +1. **Spearman cross-covariance moments** identify the measurement + system (loadings + measurement-error SDs + latent factor SDs) + per period. +2. **OLS on Bartlett-scored factor proxies** identifies transition + coefficients and the residual SD of the production shock — + the AMN (Attanasio-Meghir-Nix 2020) flavour the AF paper §7 + recommends as starting values, just bootstrapped from the + Spearman estimates rather than from a separate AMN run. + +Together these give a data-derived seed for every category that has +moment-based identification. Categories Spearman + Bartlett-OLS +cannot identify (mixture weights, initial means, controls) fall +back to neutral defaults — these affect convergence speed only, +not identification. +""" + +from collections.abc import Iterable, Mapping + +import numpy as np +import optimagic as om +import pandas as pd + +from skillmodels.amn.moments import ( + SpearmanResult, + seed_beta_from_ols, + spearman_factor_moments, +) +from skillmodels.common.model_spec import ModelSpec +from skillmodels.common.process_data import process_data +from skillmodels.common.process_model import process_model +from skillmodels.common.types import Normalizations, ProcessedModel + + +def get_spearman_start_params( + model_spec: ModelSpec, + data: pd.DataFrame, + params_template: pd.DataFrame, +) -> pd.DataFrame: + """Return a copy of `params_template` with moment-based seed values. + + Walks the params index and fills each row using: + + * `loadings`, `meas_sds`: per-period Spearman moments on the + single-factor measurements of each latent factor. + * `initial_cholcovs`: diagonal entries set to `sqrt(latent_var)` + from the period-0 Spearman result; off-diagonals 0. + * `initial_states`: 0 (location is unidentified from cross-covs). + * `mixture_weights`: uniform `1 / n_mixtures`. + * `controls`: 0. + * `shock_sds`: 0.5. + * `transition`: 0.5. + + Rows where `lower_bound == upper_bound` (user normalizations, + fixed_params pins, model-implied fixes) are left untouched. + + Args: + model_spec: Model specification. + data: Long-format panel with the same `(id, period)` MultiIndex + consumed by `get_maximization_inputs`. + params_template: The params DataFrame returned by + `get_maximization_inputs(...)["params_template"]` — it + already has the right MultiIndex, bounds, and pinned + values. + + Return: + Copy of `params_template` with the `value` column populated. + + """ + processed_model = process_model(model_spec) + processed_data = process_data( + df=data, + has_endogenous_factors=processed_model.endogenous_factors_info.has_endogenous_factors, + labels=processed_model.labels, + update_info=processed_model.update_info, + anchoring_info=processed_model.anchoring, + purpose="estimation", + ) + measurements = np.asarray(processed_data["measurements"]) + update_info = processed_model.update_info + latent_factors = processed_model.labels.latent_factors + n_mixtures = processed_model.dimensions.n_mixtures + loading_norms = _collect_loading_norms(processed_model.normalizations) + aug_periods = processed_model.labels.aug_periods + + out = params_template.copy() + # `free` here means "this entry still needs a value" — i.e. it has + # not been pinned by `enforce_fixed_constraints` or by the caller. + # We use NaN-detection instead of `lower_bound != upper_bound` because + # `enforce_fixed_constraints` only writes `value` and leaves bounds + # untouched; bound-equality alone would misclassify fixed entries. + free = out["value"].isna() + + _apply_neutral_defaults(out, free, n_mixtures=n_mixtures) + + update_info_periods = set(update_info.index.get_level_values("aug_period")) + spearman_per_period: dict[tuple[int, str], SpearmanResult] = {} + for aug_period in aug_periods: + if aug_period not in update_info_periods: + continue + period_meas_index = _measurement_row_index(update_info, aug_period) + for factor in latent_factors: + factor_meas = _single_factor_measurements( + update_info, + aug_period=aug_period, + factor=factor, + all_factors=latent_factors, + ) + if len(factor_meas) < 2: + continue + cols = [period_meas_index[m] for m in factor_meas] + sub = measurements[cols, :].T # (n_obs, n_meas) + anchor_local, anchor_loading = _pick_anchor( + factor_meas=factor_meas, factor=factor, loading_norms=loading_norms + ) + result = spearman_factor_moments( + sub, anchor_idx=anchor_local, anchor_loading=anchor_loading + ) + if not result.valid: + continue + spearman_per_period[(aug_period, factor)] = result + _override_loadings_meas_sds( + out, + free, + aug_period=aug_period, + factor=factor, + factor_meas=factor_meas, + result=result, + ) + + _override_initial_cholcovs( + out, + free, + spearman_per_period=spearman_per_period, + latent_factors=latent_factors, + n_mixtures=n_mixtures, + ) + + _override_transition_via_ols( + out, + free, + processed_model=processed_model, + measurements=measurements, + spearman_per_period=spearman_per_period, + observed_factors=np.asarray(processed_data["observed_factors"]), + ) + + _pool_within_stage_equality( + out, + free=free, + processed_model=processed_model, + ) + + return out + + +def pool_equality_groups( # noqa: C901 + params: pd.DataFrame, + constraints: list[om.constraints.Constraint], + *, + keep_pinned_values: pd.Series | None = None, +) -> pd.DataFrame: + """Pool param values within each `om.EqualityConstraint` group. + + For each `om.EqualityConstraint` whose selector is the standard + `select_by_loc(loc=multi_index)` form, replace the values of all + members of the group with a single shared value so the equality + constraint holds at the start values. If a member is flagged as + "pinned" (via `keep_pinned_values=True` for that loc), the pinned + value is used for the whole group; otherwise the group is averaged. + + Use after moment-based starting values: Spearman seeds each period + independently, which violates user equality constraints across + periods (e.g., loadings or meas_sds constant across periods). + Calling this with the user constraint list restores the equalities + while keeping the data-derived information (now pooled). + + Args: + params: Params DataFrame with a `"value"` column and the + standard 4-level MultiIndex. + constraints: List of optimagic Constraint objects. Only + `om.EqualityConstraint` entries with a `select_by_loc` + partial as `selector` are honoured. + keep_pinned_values: Optional boolean Series indexed like + `params`. Entries where this is True keep their value; + the pooling logic copies that value to every other member + of the same equality group. + + Return: + Modified copy of `params`. + """ + out = params.copy() + for c in constraints: + if not isinstance(c, om.EqualityConstraint): + continue + selector = c.selector + keywords = getattr(selector, "keywords", None) + if not keywords or "loc" not in keywords: + continue + loc = keywords["loc"] + if not isinstance(loc, pd.MultiIndex) or len(loc) <= 1: + continue + members = [m for m in loc if m in out.index] + if len(members) <= 1: + continue + if keep_pinned_values is not None: + pinned = [ + float(out.loc[m, "value"]) + for m in members + if bool(keep_pinned_values.loc[m]) and pd.notna(out.loc[m, "value"]) + ] + else: + pinned = [] + if pinned: + target = pinned[0] + else: + raw = [ + float(out.loc[m, "value"]) + for m in members + if pd.notna(out.loc[m, "value"]) + ] + if not raw: + continue + target = float(np.mean(raw)) + for m in members: + if keep_pinned_values is None or not bool(keep_pinned_values.loc[m]): + out.loc[m, "value"] = target + return out + + +def _apply_neutral_defaults( + params: pd.DataFrame, + free: pd.Series, + *, + n_mixtures: int, +) -> None: + cat = params.index.get_level_values("category") + params.loc[free & (cat == "controls"), "value"] = 0.0 + params.loc[free & (cat == "loadings"), "value"] = 1.0 + params.loc[free & (cat == "meas_sds"), "value"] = 0.5 + params.loc[free & (cat == "shock_sds"), "value"] = 0.5 + params.loc[free & (cat == "initial_states"), "value"] = 0.0 + params.loc[free & (cat == "mixture_weights"), "value"] = 1.0 / max(n_mixtures, 1) + params.loc[free & (cat == "initial_cholcovs"), "value"] = 0.0 + params.loc[free & (cat == "transition"), "value"] = 0.5 + diag_values = pd.Series( + [_is_cholcov_diag(idx) for idx in params.index], + index=params.index, + ) + diag_mask = free & (cat == "initial_cholcovs") & diag_values + params.loc[diag_mask, "value"] = 1.0 + + +def _is_cholcov_diag(idx: tuple) -> bool: + if idx[0] != "initial_cholcovs": + return False + name2 = idx[3] + if "-" not in name2: + return False + a, b = name2.split("-", 1) + return a == b + + +def _measurement_row_index( + update_info: pd.DataFrame, aug_period: int +) -> dict[str, int]: + out: dict[str, int] = {} + for flat_idx, (a_period, meas) in enumerate(update_info.index): + if a_period == aug_period: + out[meas] = flat_idx + return out + + +def _single_factor_measurements( + update_info: pd.DataFrame, + *, + aug_period: int, + factor: str, + all_factors: Iterable[str], +) -> tuple[str, ...]: + """Return measurements at `aug_period` that load only on `factor`.""" + period_rows = update_info.xs(aug_period, level="aug_period") + measurement_rows = period_rows.loc[period_rows["purpose"] == "measurement"] + out: list[str] = [] + factors = list(all_factors) + for meas, row in measurement_rows.iterrows(): + if not bool(row[factor]): + continue + if any(bool(row[f]) for f in factors if f != factor): + continue + out.append(str(meas)) + return tuple(out) + + +def _collect_loading_norms( + normalizations: Mapping[str, Normalizations], +) -> dict[tuple[str, str], float]: + """Flatten per-factor loading normalizations into a (meas, factor) → value dict.""" + out: dict[tuple[str, str], float] = {} + for factor, norms in normalizations.items(): + loadings_per_period = norms.loadings + for period_norms in loadings_per_period: + for meas, value in period_norms.items(): + out[(meas, factor)] = float(value) + return out + + +def _pick_anchor( + *, + factor_meas: tuple[str, ...], + factor: str, + loading_norms: dict[tuple[str, str], float], +) -> tuple[int, float]: + for local_idx, meas in enumerate(factor_meas): + if (meas, factor) in loading_norms: + return local_idx, loading_norms[(meas, factor)] + return 0, 1.0 + + +def _override_loadings_meas_sds( + params: pd.DataFrame, + free: pd.Series, + *, + aug_period: int, + factor: str, + factor_meas: tuple[str, ...], + result: SpearmanResult, +) -> None: + for local_idx, meas in enumerate(factor_meas): + loc_load = ("loadings", aug_period, meas, factor) + if loc_load in params.index and free.loc[loc_load]: + params.loc[loc_load, "value"] = float(result.loadings[local_idx]) + loc_sd = ("meas_sds", aug_period, meas, "-") + if loc_sd in params.index and free.loc[loc_sd]: + params.loc[loc_sd, "value"] = float(result.meas_sds[local_idx]) + + +def _override_initial_cholcovs( + params: pd.DataFrame, + free: pd.Series, + *, + spearman_per_period: dict[tuple[int, str], SpearmanResult], + latent_factors: tuple[str, ...], + n_mixtures: int, +) -> None: + for factor in latent_factors: + result = spearman_per_period.get((0, factor)) + if result is None: + continue + sd_factor = float(np.sqrt(max(result.latent_var, 1e-12))) + for comp in range(n_mixtures): + loc = ( + "initial_cholcovs", + 0, + f"mixture_{comp}", + f"{factor}-{factor}", + ) + if loc in params.index and free.loc[loc]: + params.loc[loc, "value"] = sd_factor + + +def _pool_within_stage_equality( # noqa: C901, PLR0912 + params: pd.DataFrame, + *, + free: pd.Series, + processed_model: ProcessedModel, +) -> None: + """Pool `transition` and `shock_sds` seeds within each stage. + + The `_get_stage_constraints` machinery imposes pairwise equality + constraints across aug_periods belonging to the same stage. Our + OLS-based seeds produce period-specific values; this post-processing + pools them into a single stage value so the constraints hold at + the start values. Pinned entries (set by `enforce_fixed_constraints` + before the moment-based fill) take precedence — if any member of + the equality group is pinned, the whole group uses that pinned + value; otherwise the group is averaged. + """ + stagemap = processed_model.labels.aug_stagemap + stages: dict[int, list[int]] = {} + for aug_period, stage in enumerate(stagemap): + stages.setdefault(stage, []).append(aug_period) + + for stage_periods in stages.values(): + if len(stage_periods) <= 1: + continue + for category in ("transition", "shock_sds"): + try: + cat_slice = params.loc[category] + except KeyError: + continue + existing_periods = set(cat_slice.index.get_level_values(0)) + shared = [p for p in stage_periods if p in existing_periods] + if len(shared) <= 1: + continue + sub_index = cat_slice.loc[shared[0]].index + for inner_loc in sub_index: + full_locs = [ + (category, p, *inner_loc) + for p in shared + if (category, p, *inner_loc) in params.index + ] + if len(full_locs) <= 1: + continue + pinned_values = [ + float(params.loc[loc, "value"]) + for loc in full_locs + if not bool(free.loc[loc]) and pd.notna(params.loc[loc, "value"]) + ] + if pinned_values: + target = pinned_values[0] + else: + raw_values = [ + float(params.loc[loc, "value"]) + for loc in full_locs + if pd.notna(params.loc[loc, "value"]) + ] + if not raw_values: + continue + target = float(np.mean(raw_values)) + for loc in full_locs: + if free.loc[loc]: + params.loc[loc, "value"] = target + + +def _bartlett_score( + measurements: np.ndarray, + cols: list[int], + loadings: np.ndarray, + meas_sds: np.ndarray, +) -> np.ndarray: + r"""Bartlett factor-score estimator from per-indicator measurements. + + Returns the inverse-noise-weighted single-factor proxy + :math:`\hat F = \sum_k w_k Z_k / \sum_k w_k \lambda_k` + with :math:`w_k = \lambda_k / \sigma_k^2`, over rows where all + `cols` are finite. Rows with any NaN get NaN proxy. + """ + sub = measurements[cols, :].T # (n_obs, n_meas) + weights = loadings / np.maximum(meas_sds**2, 1e-12) + denom = float(np.sum(weights * loadings)) + if denom < 1e-9: + return np.full(sub.shape[0], np.nan) + score = (sub * weights).sum(axis=1) / denom + mask = np.all(np.isfinite(sub), axis=1) + score[~mask] = np.nan + return score + + +def _override_transition_via_ols( # noqa: C901, PLR0912, PLR0915 + params: pd.DataFrame, + free: pd.Series, + *, + processed_model: ProcessedModel, + measurements: np.ndarray, + spearman_per_period: dict[tuple[int, str], SpearmanResult], + observed_factors: np.ndarray, +) -> None: + """Seed transition coefficients + shock_sds via OLS on Bartlett scores. + + For each transition equation that maps state factors at one + aug-period to a factor at the next aug-period with measurements, + run OLS of the target Bartlett score on regressors derived from + the source aug-period's Bartlett scores + observed factors. + Coefficients are written into the matching `transition` rows; + the residual SD is written to the matching `shock_sds` row. + + Currently implemented for `linear` and `translog` transition + functions. Other transition functions keep the constant-default + seeds set in `_apply_neutral_defaults`. + """ + update_info = processed_model.update_info + update_info_periods = list(update_info.index.get_level_values("aug_period")) + aug_periods = processed_model.labels.aug_periods + latent_factors = processed_model.labels.latent_factors + observed_factor_names = processed_model.labels.observed_factors + transition_info = processed_model.transition_info + + bartlett_proxies: dict[tuple[int, str], np.ndarray] = {} + for (aug_period, factor), result in spearman_per_period.items(): + period_meas_index = _measurement_row_index(update_info, aug_period) + factor_meas = _single_factor_measurements( + update_info, + aug_period=aug_period, + factor=factor, + all_factors=latent_factors, + ) + cols = [period_meas_index[m] for m in factor_meas] + proxy = _bartlett_score( + measurements, + cols, + result.loadings, + result.meas_sds, + ) + bartlett_proxies[(aug_period, factor)] = proxy + + n_obs = measurements.shape[1] if measurements.ndim == 2 else 0 + n_calendar_periods = processed_model.dimensions.n_periods + + for src_idx, src_aug in enumerate(aug_periods[:-1]): + tgt_aug = aug_periods[src_idx + 1] + if tgt_aug not in update_info_periods: + continue + cal_idx_src = _aug_to_calendar_idx( + processed_model, + src_aug, + n_calendar_periods, + ) + if cal_idx_src is None: + continue + if observed_factors.ndim == 3: + obs_at_src = observed_factors[cal_idx_src] + else: + obs_at_src = np.zeros((n_obs, 0)) + + for factor in latent_factors: + func_name = transition_info.function_names.get(factor) + if func_name not in ("linear", "translog"): + continue + if (tgt_aug, factor) not in bartlett_proxies: + continue + target = bartlett_proxies[(tgt_aug, factor)] + + source_factor_proxies: dict[str, np.ndarray] = {} + for src_factor in latent_factors: + if (src_aug, src_factor) in bartlett_proxies: + source_factor_proxies[src_factor] = bartlett_proxies[ + (src_aug, src_factor) + ] + if factor not in source_factor_proxies: + # Need at least the dependent factor's source proxy + # for the regression to be meaningful. + continue + + param_names = transition_info.param_names[factor] + design, regressor_to_col = _build_design_for_transition( + func_name=func_name, + param_names=param_names, + latent_factors=latent_factors, + source_factor_proxies=source_factor_proxies, + observed_factor_names=observed_factor_names, + observed_factor_data=obs_at_src, + ) + if design is None: + continue + mask = np.isfinite(target) & np.all(np.isfinite(design), axis=1) + if mask.sum() <= design.shape[1] + 1: + continue + beta = seed_beta_from_ols(target[mask], design[mask]) + if not np.all(np.isfinite(beta)): + continue + for regressor, col_idx in regressor_to_col.items(): + loc = ("transition", src_aug, factor, regressor) + if loc in params.index and free.loc[loc]: + params.loc[loc, "value"] = float(beta[col_idx]) + + # Residual SD → shock_sds[src_aug][factor]. + residual = target[mask] - design[mask] @ beta + tgt_result = spearman_per_period.get((tgt_aug, factor)) + if tgt_result is None: + continue + # Bartlett-score residual variance includes + # shock_var + (Bartlett-score-noise) ≈ shock_var + 1/Σ w·λ. + score_noise_var = 1.0 / max( + np.sum( + tgt_result.loadings**2 / np.maximum(tgt_result.meas_sds**2, 1e-12), + ), + 1e-9, + ) + raw_var = float(np.var(residual, ddof=1)) + shock_var = max(raw_var - score_noise_var, 1e-6) + shock_sd = float(np.sqrt(shock_var)) + loc_sd = ("shock_sds", src_aug, factor, "-") + if loc_sd in params.index and free.loc[loc_sd]: + params.loc[loc_sd, "value"] = shock_sd + + +def _aug_to_calendar_idx( + processed_model: ProcessedModel, + aug_period: int, + n_calendar_periods: int, +) -> int | None: + """Map an aug-period to the calendar period of `observed_factors`. + + `processed_data["observed_factors"]` has shape + `(n_periods, n_obs, n_observed_factors)`; this returns the + calendar period index for the given aug-period, or `None` if it + falls outside the calendar range. + """ + mapping = processed_model.labels.aug_periods_to_periods + cal = mapping.get(aug_period) + if cal is None: + return None + if 0 <= int(cal) < n_calendar_periods: + return int(cal) + return None + + +def _build_design_for_transition( # noqa: C901 + *, + func_name: str, # noqa: ARG001 + param_names: tuple[str, ...], + latent_factors: tuple[str, ...], # noqa: ARG001 + source_factor_proxies: dict[str, np.ndarray], + observed_factor_names: tuple[str, ...], + observed_factor_data: np.ndarray, +) -> tuple[np.ndarray | None, dict[str, int]]: + """Build the OLS design matrix matching `param_names`. + + Returns `(design, regressor_to_col)` where `regressor_to_col` maps + each handled regressor name to its column index in `design`. + Regressors that cannot be built from the available proxies are + omitted (the corresponding transition coefficient stays at the + constant-default seed). + """ + n_obs = next(iter(source_factor_proxies.values())).shape[0] + columns: list[np.ndarray] = [] + regressor_to_col: dict[str, int] = {} + + def _proxy_for(name: str) -> np.ndarray | None: + if name in source_factor_proxies: + return source_factor_proxies[name] + if name in observed_factor_names: + idx = observed_factor_names.index(name) + if observed_factor_data.shape[1] > idx: + return observed_factor_data[:, idx] + return None + + for regressor in param_names: + if regressor == "constant": + columns.append(np.ones(n_obs)) + regressor_to_col[regressor] = len(columns) - 1 + elif " ** 2" in regressor: + name = regressor.replace(" ** 2", "").strip() + proxy = _proxy_for(name) + if proxy is not None: + columns.append(proxy * proxy) + regressor_to_col[regressor] = len(columns) - 1 + elif " * " in regressor: + a, b = (s.strip() for s in regressor.split(" * ")) + pa, pb = _proxy_for(a), _proxy_for(b) + if pa is not None and pb is not None: + columns.append(pa * pb) + regressor_to_col[regressor] = len(columns) - 1 + else: + proxy = _proxy_for(regressor) + if proxy is not None: + columns.append(proxy) + regressor_to_col[regressor] = len(columns) - 1 + + if not columns: + return None, {} + design = np.column_stack(columns) + return design, regressor_to_col diff --git a/src/skillmodels/amn/types.py b/src/skillmodels/amn/types.py new file mode 100644 index 00000000..4d9da73f --- /dev/null +++ b/src/skillmodels/amn/types.py @@ -0,0 +1,293 @@ +"""Frozen dataclass definitions for the AMN estimator. + +Mirrors the structure of `skillmodels.af.types` for consistency. The +three-stage Attanasio-Meghir-Nix (2020) procedure produces a stack of +intermediate results (reduced-form mixture, structural recovery, +production-function regression); each stage's output is held in +`AMNStageResults`. +""" + +from collections.abc import Mapping +from dataclasses import dataclass +from types import MappingProxyType +from typing import TYPE_CHECKING, Any, Literal + +import numpy as np +import pandas as pd + +from skillmodels.common.types import ensure_containers_are_immutable + +if TYPE_CHECKING: + from skillmodels.common.model_spec import ModelSpec + + +@dataclass(frozen=True, init=False) +class AMNEstimationOptions: + """Configuration options for the AMN estimator.""" + + n_mixture_components: int + """Components in the Gaussian-mixture approximation to F_{theta,X}.""" + + em_max_iter: int + """Maximum EM iterations in Stage 1.""" + + em_tol: float + """Log-likelihood tolerance for EM convergence.""" + + em_n_init: int + """Number of EM restarts; keep the highest-likelihood fit.""" + + em_reg_covar: float + """Diagonal ridge added to each EM covariance for numerical stability.""" + + n_simulation_draws: int + """Synthetic latent-factor panel size for Stage 3.""" + + minimum_distance_weighting: Literal["identity", "optimal"] + """Stage 2 weighting matrix. `"optimal"` uses a 2-step Avar estimate; + `"identity"` is faster and the paper's default.""" + + investment_endogeneity: bool + """If True, Stage 3 includes the control-function residual in the + production-function regression (AMN eq. 8). Ignored when the model has + no endogenous (investment) factors.""" + + optimizer_algorithm: str + """optimagic algorithm name for Stage 2 minimum-distance optimization.""" + + optimizer_options: MappingProxyType[str, Any] + """Additional kwargs forwarded to optimagic in Stage 2.""" + + keep_synthetic_panel: bool + """Retain the Stage-3 simulated panel on the result for diagnostics. Off + by default to keep result objects compact.""" + + seed: int + """RNG seed used for Stage 3 simulation and bootstrap inference.""" + + def __init__( # noqa: D107 + self, + n_mixture_components: int = 2, + em_max_iter: int = 500, + em_tol: float = 1e-6, + em_n_init: int = 5, + em_reg_covar: float = 1e-6, + n_simulation_draws: int = 100_000, + minimum_distance_weighting: Literal["identity", "optimal"] = "identity", + optimizer_algorithm: str = "scipy_lbfgsb", + optimizer_options: Mapping[str, Any] | None = None, + *, + investment_endogeneity: bool = True, + keep_synthetic_panel: bool = False, + seed: int = 0, + ) -> None: + object.__setattr__(self, "n_mixture_components", n_mixture_components) + object.__setattr__(self, "em_max_iter", em_max_iter) + object.__setattr__(self, "em_tol", em_tol) + object.__setattr__(self, "em_n_init", em_n_init) + object.__setattr__(self, "em_reg_covar", em_reg_covar) + object.__setattr__(self, "n_simulation_draws", n_simulation_draws) + object.__setattr__( + self, "minimum_distance_weighting", minimum_distance_weighting + ) + object.__setattr__(self, "investment_endogeneity", investment_endogeneity) + object.__setattr__(self, "optimizer_algorithm", optimizer_algorithm) + object.__setattr__( + self, + "optimizer_options", + ensure_containers_are_immutable(optimizer_options or {}), + ) + object.__setattr__(self, "keep_synthetic_panel", keep_synthetic_panel) + object.__setattr__(self, "seed", seed) + + +@dataclass(frozen=True) +class AugmentedMeasureLayout: + """Index bookkeeping for the augmented measure vector. + + AMN Stage 1 fits a Gaussian mixture on the joint vector of: + 1. Factor measurements at each period (have measurement error), + 2. Observed factor values at each period (no measurement error, + loading fixed at 1, intercept free), + 3. Controls (time-invariant, no measurement error). + + The layout records which slot in the stacked vector corresponds to + which conceptual quantity, so Stage 2 can map the fitted Pi/Psi back + onto the structural Lambda/A/Sigma/mu/Omega. + """ + + columns: tuple[str, ...] + """Human-readable label per augmented-vector column.""" + + measurement_slots: tuple[int, ...] + """Indices of slots that correspond to factor measurements (with + measurement error). One per (period, measurement) update.""" + + observed_factor_slots: tuple[int, ...] + """Indices of slots that correspond to observed factor values (no + measurement error). One per (period, observed factor).""" + + control_slots: tuple[int, ...] + """Indices of slots that correspond to controls (no measurement + error).""" + + measurement_meta: tuple[tuple[int, str, str], ...] + """For each measurement slot: (period, factor_name, measurement_name).""" + + observed_factor_meta: tuple[tuple[int, str], ...] + """For each observed-factor slot: (period, observed_factor_name).""" + + control_meta: tuple[str, ...] + """Control name for each control slot.""" + + +@dataclass(frozen=True) +class MixtureFitResult: + """Output of Stage 1: reduced-form mixture parameters. + + The fitted distribution is + ``sum_k weights[k] * Normal(means[k], covariances[k])`` on the + augmented measure vector. Matches AMN eq. (11)-(14). + """ + + weights: np.ndarray + """Mixture weights, shape ``(n_components,)``.""" + + means: np.ndarray + """Per-component mean vectors, shape ``(n_components, n_aug)``.""" + + covariances: np.ndarray + """Per-component covariance matrices, shape + ``(n_components, n_aug, n_aug)``.""" + + loglikelihood: float + """Final EM log-likelihood (summed across observations).""" + + n_iter: int + """EM iterations run by the best restart.""" + + converged: bool + """Whether the best restart converged within `em_tol`.""" + + layout: AugmentedMeasureLayout + """Slot bookkeeping for the augmented measure vector this mixture was + fit on.""" + + +@dataclass(frozen=True) +class MinimumDistanceResult: + """Output of Stage 2: structural parameters from the reduced-form mixture. + + All arrays are in the standard skillmodels ordering established by + `process_model.process_model`. + """ + + loadings: pd.DataFrame + """Recovered factor loadings, MultiIndexed by (period, measurement, + factor).""" + + measurement_intercepts: pd.DataFrame + """Recovered measurement intercepts, MultiIndexed by (period, + measurement, control).""" + + measurement_sds: pd.DataFrame + """Recovered measurement-error SDs, MultiIndexed by (period, + measurement).""" + + factor_mixture_means: np.ndarray + """Per-component means of the latent factors stacked across periods, + shape ``(n_components, n_factor_period_slots)``.""" + + factor_mixture_covariances: np.ndarray + """Per-component covariances of the same stacked factor vector, shape + ``(n_components, n_factor_period_slots, n_factor_period_slots)``.""" + + factor_period_slots: tuple[tuple[int, str], ...] + """Ordered ``(period, factor_name)`` for the + ``factor_mixture_*`` arrays.""" + + objective_value: float + """Minimum-distance criterion at the optimum.""" + + success: bool + """Whether the Stage-2 optimization converged.""" + + +@dataclass(frozen=True) +class ProductionFitResult: + """Output of Stage 3: production-function and investment-equation params. + + Fitted by regression on a simulated latent-factor panel; see AMN 2020 + eqs. 4-5, 7-8. + """ + + production_params: pd.DataFrame + """Production-function parameters, in the standard skillmodels + params-DataFrame format (4-level MultiIndex).""" + + investment_params: pd.DataFrame + """Investment-equation parameters (eq. 7), 4-level MultiIndex. Empty + if the model has no endogenous factors.""" + + n_draws: int + """Number of simulated latent-factor trajectories used.""" + + seed: int + """RNG seed used for the simulation.""" + + +@dataclass(frozen=True) +class AMNStageResults: + """Container for the three stages' intermediate outputs.""" + + mixture: MixtureFitResult + """Stage 1 reduced-form mixture fit.""" + + structural: MinimumDistanceResult + """Stage 2 structural recovery.""" + + production: ProductionFitResult + """Stage 3 production-function regression.""" + + +@dataclass(frozen=True) +class AMNEstimationResult: + """Complete result from AMN estimation.""" + + model_spec: ModelSpec + """The ModelSpec used for estimation.""" + + stages: AMNStageResults + """Per-stage intermediate outputs.""" + + all_params: pd.DataFrame + """Combined parameters across stages, in the standard 4-level + MultiIndex (category, period, name1, name2) format consumed by every + other skillmodels entry point.""" + + success: bool + """AND across stage convergence flags.""" + + synthetic_panel: pd.DataFrame | None = None + """Stage-3 simulated factor panel, kept iff + `AMNEstimationOptions.keep_synthetic_panel` is True.""" + + +@dataclass(frozen=True) +class AMNInferenceResult: + """Cluster-bootstrap standard errors and covariance for AMN params.""" + + standard_errors: pd.Series + """std across replicate_params, indexed by the params MultiIndex.""" + + vcov: pd.DataFrame + """cov(replicate_params), MultiIndexed on both axes.""" + + replicate_params: pd.DataFrame + """One row per bootstrap replicate, columns = params MultiIndex.""" + + n_clusters: int + """Caseids resampled per replicate.""" + + n_boot: int + """Number of bootstrap replicates.""" diff --git a/src/skillmodels/chs/__init__.py b/src/skillmodels/chs/__init__.py new file mode 100644 index 00000000..d80cae8a --- /dev/null +++ b/src/skillmodels/chs/__init__.py @@ -0,0 +1,34 @@ +"""CHS (Cunha-Heckman-Schennach 2010) Kalman-filter MLE estimator. + +This subpackage holds the state-space machinery that powers the +default skillmodels estimator: + +* `kalman_filters` — square-root unscented and extended Kalman filter + predict/update steps. +* `likelihood` (`+ `_debug`) — Kalman-filter log-likelihood. +* `maximization_inputs` — `get_maximization_inputs()`, the canonical + entry point that bundles likelihood / gradients / constraints / + params template for `optimagic.maximize`. +* `filtered_states` — `get_filtered_states()` post-estimation helper. +* `process_debug_data` — Kalman-debug-output post-processing. +* `qr`, `clipping` — numerical helpers (square-root QR, soft clipping + for UKF stability). + +The public top-level package re-exports the user-facing entry points +(`get_maximization_inputs`, `get_filtered_states`, `create_state_ranges`) +so most callers don't need to touch the `chs.` prefix. +""" + +from skillmodels.chs.filtered_states import get_filtered_states +from skillmodels.chs.maximization_inputs import get_maximization_inputs +from skillmodels.chs.process_debug_data import ( + create_state_ranges, + process_debug_data, +) + +__all__ = [ + "create_state_ranges", + "get_filtered_states", + "get_maximization_inputs", + "process_debug_data", +] diff --git a/src/skillmodels/clipping.py b/src/skillmodels/chs/clipping.py similarity index 100% rename from src/skillmodels/clipping.py rename to src/skillmodels/chs/clipping.py diff --git a/src/skillmodels/filtered_states.py b/src/skillmodels/chs/filtered_states.py similarity index 66% rename from src/skillmodels/filtered_states.py rename to src/skillmodels/chs/filtered_states.py index c525ac0e..3bf66aff 100644 --- a/src/skillmodels/filtered_states.py +++ b/src/skillmodels/chs/filtered_states.py @@ -1,25 +1,79 @@ """Functions to compute and process filtered latent states.""" -from typing import Any +from typing import TYPE_CHECKING, Any import jax.numpy as jnp import numpy as np import pandas as pd -from skillmodels.maximization_inputs import get_maximization_inputs -from skillmodels.model_spec import ModelSpec -from skillmodels.params_index import get_params_index -from skillmodels.parse_params import create_parsing_info, parse_params -from skillmodels.process_debug_data import create_state_ranges -from skillmodels.process_model import process_model +from skillmodels.chs.maximization_inputs import get_maximization_inputs +from skillmodels.chs.process_debug_data import create_state_ranges +from skillmodels.common.model_spec import ModelSpec +from skillmodels.common.params_index import get_params_index +from skillmodels.common.parse_params import create_parsing_info, parse_params +from skillmodels.common.process_model import process_model + +if TYPE_CHECKING: + from skillmodels.af.types import AFEstimationResult + from skillmodels.amn.types import AMNEstimationResult def get_filtered_states( model_spec: ModelSpec, data: pd.DataFrame, params: pd.DataFrame, + af_result: AFEstimationResult | None = None, + amn_result: AMNEstimationResult | None = None, ) -> dict[str, dict[str, Any]]: - """Compute filtered latent states given data and estimated parameters.""" + """Compute latent state estimates given data and estimated parameters. + + For CHS (Kalman filter) estimation, computes filtered states via the + debug likelihood. For AF estimation, computes posterior means via + Halton quadrature. For AMN estimation, computes mixture-Schur + conditional posteriors of the latent factors given the augmented + measure vector. + + Args: + model_spec: Model specification. + data: Dataset in long format with MultiIndex (id, period). + params: Estimated parameter DataFrame. + af_result: If provided, use AF posterior computation instead of + CHS Kalman filtering. + amn_result: If provided, use AMN mixture-Schur posteriors + instead. Only one of `af_result` and `amn_result` may be + set. + + Return: + Dict with "unanchored_states" (always present) and + "anchored_states" (CHS only), each containing "states" + DataFrame and "state_ranges". + + """ + if af_result is not None and amn_result is not None: + msg = "Pass only one of af_result / amn_result." + raise ValueError(msg) + + if af_result is not None: + from skillmodels.af.posterior_states import ( # noqa: PLC0415 + get_af_posterior_states, + ) + + return get_af_posterior_states( + af_result=af_result, + model_spec=model_spec, + data=data, + ) + + if amn_result is not None: + from skillmodels.amn.posterior_states import ( # noqa: PLC0415 + get_amn_posterior_states, + ) + + return get_amn_posterior_states( + amn_result=amn_result, + data=data, + ) + max_inputs = get_maximization_inputs(model_spec=model_spec, data=data) params = params.loc[max_inputs["params_template"].index] debug_loglike = max_inputs["debug_loglike"] diff --git a/src/skillmodels/kalman_filters.py b/src/skillmodels/chs/kalman_filters.py similarity index 99% rename from src/skillmodels/kalman_filters.py rename to src/skillmodels/chs/kalman_filters.py index 7f9549f7..996223b8 100644 --- a/src/skillmodels/kalman_filters.py +++ b/src/skillmodels/chs/kalman_filters.py @@ -6,7 +6,7 @@ import jax.numpy as jnp from jax import Array -from skillmodels.qr import qr_gpu +from skillmodels.chs.qr import qr_gpu LINEAR_FUNCTION_NAMES = frozenset({"linear", "constant"}) diff --git a/src/skillmodels/kalman_filters_debug.py b/src/skillmodels/chs/kalman_filters_debug.py similarity index 100% rename from src/skillmodels/kalman_filters_debug.py rename to src/skillmodels/chs/kalman_filters_debug.py diff --git a/src/skillmodels/likelihood_function.py b/src/skillmodels/chs/likelihood.py similarity index 98% rename from src/skillmodels/likelihood_function.py rename to src/skillmodels/chs/likelihood.py index ee89f016..5ff7c5dc 100644 --- a/src/skillmodels/likelihood_function.py +++ b/src/skillmodels/chs/likelihood.py @@ -8,10 +8,10 @@ import jax.numpy as jnp from jax import Array -from skillmodels.clipping import soft_clipping -from skillmodels.kalman_filters import kalman_update -from skillmodels.parse_params import parse_params -from skillmodels.types import ( +from skillmodels.chs.clipping import soft_clipping +from skillmodels.chs.kalman_filters import kalman_update +from skillmodels.common.parse_params import parse_params +from skillmodels.common.types import ( Dimensions, EstimationOptions, Labels, diff --git a/src/skillmodels/likelihood_function_debug.py b/src/skillmodels/chs/likelihood_debug.py similarity index 97% rename from src/skillmodels/likelihood_function_debug.py rename to src/skillmodels/chs/likelihood_debug.py index 6391695a..8960e474 100644 --- a/src/skillmodels/likelihood_function_debug.py +++ b/src/skillmodels/chs/likelihood_debug.py @@ -8,10 +8,10 @@ import jax.numpy as jnp from jax import Array -from skillmodels.clipping import soft_clipping -from skillmodels.kalman_filters_debug import kalman_update -from skillmodels.parse_params import parse_params -from skillmodels.types import ( +from skillmodels.chs.clipping import soft_clipping +from skillmodels.chs.kalman_filters_debug import kalman_update +from skillmodels.common.parse_params import parse_params +from skillmodels.common.types import ( Dimensions, EstimationOptions, Labels, diff --git a/src/skillmodels/maximization_inputs.py b/src/skillmodels/chs/maximization_inputs.py similarity index 74% rename from src/skillmodels/maximization_inputs.py rename to src/skillmodels/chs/maximization_inputs.py index a01c55cd..c85948a2 100644 --- a/src/skillmodels/maximization_inputs.py +++ b/src/skillmodels/chs/maximization_inputs.py @@ -11,34 +11,38 @@ from jax import Array from numpy.typing import NDArray -import skillmodels.likelihood_function as lf -import skillmodels.likelihood_function_debug as lfd -from skillmodels.constraints import ( - add_bounds, - enforce_fixed_constraints, - get_constraints, -) -from skillmodels.kalman_filters import ( +import skillmodels.chs.likelihood as lf +import skillmodels.chs.likelihood_debug as lfd +from skillmodels.amn.estimate import estimate_amn +from skillmodels.amn.start_values import get_spearman_start_params +from skillmodels.chs.kalman_filters import ( calculate_sigma_scaling_factor_and_weights, is_all_linear, kalman_predict, linear_kalman_predict, ) -from skillmodels.model_spec import ModelSpec -from skillmodels.params_index import get_params_index -from skillmodels.parse_params import create_parsing_info -from skillmodels.process_data import process_data -from skillmodels.process_debug_data import process_debug_data -from skillmodels.process_model import process_model -from skillmodels.types import ParsingInfo, ProcessedModel +from skillmodels.chs.process_debug_data import process_debug_data +from skillmodels.common.constraints import ( + FixedConstraintWithValue, + add_bounds, + enforce_fixed_constraints, + get_constraints, +) +from skillmodels.common.model_spec import ModelSpec +from skillmodels.common.params_index import get_params_index +from skillmodels.common.parse_params import create_parsing_info +from skillmodels.common.process_data import process_data +from skillmodels.common.process_model import process_model +from skillmodels.common.types import ParsingInfo, ProcessedModel jax.config.update("jax_enable_x64", True) # noqa: FBT003 -def get_maximization_inputs( +def get_maximization_inputs( # noqa: C901, PLR0915 model_spec: ModelSpec, data: pd.DataFrame, split_dataset: int = 1, + fixed_params: pd.DataFrame | None = None, ) -> dict[str, Any]: """Create inputs for optimagic's maximize function. @@ -47,6 +51,16 @@ def get_maximization_inputs( data: Dataset in long format. split_dataset: Controls into how many slices to split the dataset during the gradient computation. + fixed_params: Optional DataFrame with a ``"value"`` column pinning + specified parameters to fixed values. Uses the same 4-level + MultiIndex as the returned ``params_template``. Each matching + entry becomes a `FixedConstraintWithValue` in the returned + constraints list, so optimagic holds the parameter at the given + value during optimization. When a fix overlaps a + `ProbabilityConstraint` selector (e.g., a gamma of a ``log_ces`` + transition), optimagic's fold machinery keeps the remaining free + entries on the implied simplex (see + ``optimagic.ProbabilityConstraint``). Returns a dictionary with keys: loglike: A jax jitted function that takes an optimagic-style @@ -62,7 +76,7 @@ def get_maximization_inputs( loglike_and_gradient: Combination of loglike and loglike_gradient that is faster than calling the two functions separately. constraints: List of optimagic constraints that are implied by the - model specification. + model specification, extended by any user-supplied ``fixed_params``. params_template: Parameter DataFrame with correct index and bounds. The value column is empty except for the fixed constraints, which are set including the bounds. @@ -178,6 +192,12 @@ def debug_loglike(params: pd.DataFrame) -> dict[str, Any]: endogenous_factors_info=processed_model.endogenous_factors_info, ) + if fixed_params is not None: + fixed_constraints = _build_fixed_constraints_from_params( + fixed_params, params_index=p_index + ) + constraints = list(constraints) + fixed_constraints + params_template = pd.DataFrame(columns=["value"], index=p_index) params_template = add_bounds( params=params_template, @@ -189,6 +209,33 @@ def debug_loglike(params: pd.DataFrame) -> dict[str, Any]: ) if not params_template.index.equals(p_index): raise ValueError("params_template index is not equal to p_index") + + strategy = processed_model.estimation_options.start_params_strategy + if strategy == "spearman": + params_template = get_spearman_start_params( + model_spec=model_spec, + data=data, + params_template=params_template, + ) + elif strategy == "amn": + amn_result = estimate_amn(model_spec=model_spec, data=data) + # First fill template via Spearman for entries AMN doesn't touch + # (mixture weights, initial Cholesky diagonals not directly + # produced by AMN's three stages); then overlay AMN values onto + # the common index. Skip indices pre-pinned by + # `enforce_fixed_constraints`. + pre_pinned = params_template["value"].notna() + params_template = get_spearman_start_params( + model_spec=model_spec, + data=data, + params_template=params_template, + ) + common = amn_result.all_params.index.intersection(params_template.index) + free_common = common[~pre_pinned.reindex(common, fill_value=False)] + params_template.loc[free_common, "value"] = amn_result.all_params.loc[ + free_common, "value" + ] + return { "loglike": loglike, "loglikeobs": loglikeobs, @@ -199,6 +246,27 @@ def debug_loglike(params: pd.DataFrame) -> dict[str, Any]: } +def _build_fixed_constraints_from_params( + fixed_params: pd.DataFrame, + params_index: pd.MultiIndex, +) -> list[FixedConstraintWithValue]: + """Convert a user-provided ``fixed_params`` DataFrame into constraints. + + Each matching row becomes a ``FixedConstraintWithValue`` so optimagic + can treat user fixes uniformly with model-implied fixes (normalisations, + anchoring, augmented periods, ...). Entries whose index is not in + ``params_index`` are ignored. + """ + common = params_index.intersection(fixed_params.index) + return [ + FixedConstraintWithValue( + loc=idx, + value=float(fixed_params.loc[idx, "value"]), + ) + for idx in common + ] + + def _partial_some_log_likelihood( fun: Callable, parsing_info: ParsingInfo, diff --git a/src/skillmodels/process_debug_data.py b/src/skillmodels/chs/process_debug_data.py similarity index 99% rename from src/skillmodels/process_debug_data.py rename to src/skillmodels/chs/process_debug_data.py index 770317d5..41c977f7 100644 --- a/src/skillmodels/process_debug_data.py +++ b/src/skillmodels/chs/process_debug_data.py @@ -7,7 +7,7 @@ from jax import Array from numpy.typing import NDArray -from skillmodels.types import ProcessedModel +from skillmodels.common.types import ProcessedModel def process_debug_data( diff --git a/src/skillmodels/qr.py b/src/skillmodels/chs/qr.py similarity index 100% rename from src/skillmodels/qr.py rename to src/skillmodels/chs/qr.py diff --git a/src/skillmodels/common/__init__.py b/src/skillmodels/common/__init__.py new file mode 100644 index 00000000..e56ee676 --- /dev/null +++ b/src/skillmodels/common/__init__.py @@ -0,0 +1,14 @@ +"""Estimator-agnostic infrastructure shared by CHS, AF, and AMN. + +This subpackage holds everything that the three estimator subpackages +build on but do not own: the user-facing model specification +(`ModelSpec`, `FactorSpec`, `AnchoringSpec`), the data and parameter +processing pipeline (`process_model`, `process_data`, `params_index`, +`parse_params`), the constraint plumbing (`constraints`, +`decorators`), shared transition-function library, and the +visualisation helpers that operate on the common filtered-states +DataFrame format. + +The dependency rule for this package: it imports from no estimator +subpackage. Conversely, `chs`, `af`, and `amn` import freely from here. +""" diff --git a/src/skillmodels/check_model.py b/src/skillmodels/common/check_model.py similarity index 98% rename from src/skillmodels/check_model.py rename to src/skillmodels/common/check_model.py index 18f85af7..e6c94c93 100644 --- a/src/skillmodels/check_model.py +++ b/src/skillmodels/common/check_model.py @@ -4,8 +4,8 @@ import numpy as np -from skillmodels.model_spec import ModelSpec -from skillmodels.types import Anchoring, Dimensions, Labels +from skillmodels.common.model_spec import ModelSpec +from skillmodels.common.types import Anchoring, Dimensions, Labels def check_model( diff --git a/src/skillmodels/common/config.py b/src/skillmodels/common/config.py new file mode 100644 index 00000000..c135b749 --- /dev/null +++ b/src/skillmodels/common/config.py @@ -0,0 +1,10 @@ +"""Configuration constants and paths for skillmodels.""" + +from pathlib import Path + +# `__file__` lives in src/skillmodels/common/config.py; test_data sits in +# src/skillmodels/test_data so resolve one level up. +TEST_DATA_DIR = Path(__file__).resolve().parent.parent / "test_data" +REGRESSION_VAULT = ( + Path(__file__).resolve().parent.parent.parent.parent / "tests" / "regression_vault" +) diff --git a/src/skillmodels/constraints.py b/src/skillmodels/common/constraints.py similarity index 95% rename from src/skillmodels/constraints.py rename to src/skillmodels/common/constraints.py index 4285ac0e..e403f73d 100644 --- a/src/skillmodels/constraints.py +++ b/src/skillmodels/common/constraints.py @@ -10,8 +10,8 @@ import optimagic as om import pandas as pd -import skillmodels.transition_functions as t_f_module -from skillmodels.types import ( +import skillmodels.common.transition_functions as t_f_module +from skillmodels.common.types import ( Anchoring, Dimensions, EndogenousFactorsInfo, @@ -429,7 +429,16 @@ def _get_constraints_for_augmented_periods( for k, v in aug_period_meas_types.items() if v == aug_period_meas_type_to_constrain ] - for aug_period in aug_periods_to_constrain: + # The last entry of `aug_periods_to_constrain` is the aug-period + # half of the last calendar period for this factor's meas-type. + # `get_transition_index_tuples` stops at `aug_periods[:-2]` when + # endogenous factors are present (or `[:-1]` otherwise), so the + # params index has no transition entries at that final aug-period + # for any factor. Emitting identity constraints there would target + # locs that don't exist and trip the optimagic selector. The + # shock-sds loop below already uses `[:-1]` for the same reason + # — keep them symmetric. + for aug_period in aug_periods_to_constrain[:-1]: if func := getattr(t_f_module, f"identity_constraints_{tname}", False): constraints += func( # ty: ignore[call-non-callable] factor=factor, diff --git a/src/skillmodels/correlation_heatmap.py b/src/skillmodels/common/correlation_heatmap.py similarity index 99% rename from src/skillmodels/correlation_heatmap.py rename to src/skillmodels/common/correlation_heatmap.py index be0f6aff..e8642312 100644 --- a/src/skillmodels/correlation_heatmap.py +++ b/src/skillmodels/common/correlation_heatmap.py @@ -7,10 +7,10 @@ from numpy.typing import NDArray from plotly import graph_objects as go -from skillmodels.model_spec import ModelSpec -from skillmodels.process_data import pre_process_data -from skillmodels.process_model import process_model -from skillmodels.types import ProcessedModel +from skillmodels.common.model_spec import ModelSpec +from skillmodels.common.process_data import pre_process_data +from skillmodels.common.process_model import process_model +from skillmodels.common.types import ProcessedModel def plot_correlation_heatmap( diff --git a/src/skillmodels/decorators.py b/src/skillmodels/common/decorators.py similarity index 100% rename from src/skillmodels/decorators.py rename to src/skillmodels/common/decorators.py diff --git a/src/skillmodels/diagnostic_plots.py b/src/skillmodels/common/diagnostic_plots.py similarity index 97% rename from src/skillmodels/diagnostic_plots.py rename to src/skillmodels/common/diagnostic_plots.py index 21d45634..c6d71c3f 100644 --- a/src/skillmodels/diagnostic_plots.py +++ b/src/skillmodels/common/diagnostic_plots.py @@ -6,9 +6,9 @@ import pandas as pd import plotly.graph_objects as go -from skillmodels.maximization_inputs import get_maximization_inputs -from skillmodels.model_spec import ModelSpec -from skillmodels.process_model import process_model +from skillmodels.chs.maximization_inputs import get_maximization_inputs +from skillmodels.common.model_spec import ModelSpec +from skillmodels.common.process_model import process_model def plot_residual_boxplots( diff --git a/src/skillmodels/model_spec.py b/src/skillmodels/common/model_spec.py similarity index 88% rename from src/skillmodels/model_spec.py rename to src/skillmodels/common/model_spec.py index 9959c2cd..7ddc6796 100644 --- a/src/skillmodels/model_spec.py +++ b/src/skillmodels/common/model_spec.py @@ -11,7 +11,7 @@ from types import MappingProxyType from typing import Any, Self -from skillmodels.types import ( +from skillmodels.common.types import ( EstimationOptions, Normalizations, ensure_containers_are_immutable, @@ -32,6 +32,27 @@ class FactorSpec: """Whether this factor is a correction factor.""" transition_function: str | Callable | None = None """Transition function name (e.g. `"linear"`, `"log_ces"`) or a callable.""" + has_production_shock: bool = True + """Whether transitions add a stochastic shock for this factor. + + When `False`, the AF transition integrates the factor deterministically: + no shock SD parameter, no shock dimension in the joint Halton draw, and + the transition output is used as-is. Set this to `False` for + time-invariant factors (combined with an identity transition pinned via + `fixed_params`) to cut integration dimensionality. + """ + has_initial_distribution: bool = True + """Whether this factor is drawn from the AF period-0 mixture distribution. + + When `False`, the factor is not included in the initial joint mixture + (no mean / Cholesky entries for it) and is instead reconstructed + deterministically per Halton draw. Currently only supported in + conjunction with `is_endogenous=True`: the factor's period-0 value is + computed from its investment equation at period 0 plus an investment + shock, with investment-equation and shock parameters estimated as part + of the initial step. The transition function must not depend on the + factor's own lag. + """ def with_transition_function(self, func: str | Callable) -> Self: """Return a new FactorSpec with the given transition function.""" @@ -129,6 +150,8 @@ def from_dict(cls, d: dict[str, Any]) -> Self: is_endogenous=spec.get("is_endogenous", False), is_correction=spec.get("is_correction", False), transition_function=spec.get("transition_function"), + has_production_shock=spec.get("has_production_shock", True), + has_initial_distribution=spec.get("has_initial_distribution", True), ) anchoring = None diff --git a/src/skillmodels/params_index.py b/src/skillmodels/common/params_index.py similarity index 99% rename from src/skillmodels/params_index.py rename to src/skillmodels/common/params_index.py index c3a19587..ce0491ac 100644 --- a/src/skillmodels/params_index.py +++ b/src/skillmodels/common/params_index.py @@ -2,7 +2,7 @@ import pandas as pd -from skillmodels.types import ( +from skillmodels.common.types import ( Dimensions, EndogenousFactorsInfo, Labels, diff --git a/src/skillmodels/parse_params.py b/src/skillmodels/common/parse_params.py similarity index 99% rename from src/skillmodels/parse_params.py rename to src/skillmodels/common/parse_params.py index 21e06a5e..70a0f0c1 100644 --- a/src/skillmodels/parse_params.py +++ b/src/skillmodels/common/parse_params.py @@ -8,7 +8,7 @@ import pandas as pd from jax import Array -from skillmodels.types import ( +from skillmodels.common.types import ( Anchoring, Dimensions, Labels, diff --git a/src/skillmodels/process_data.py b/src/skillmodels/common/process_data.py similarity index 99% rename from src/skillmodels/process_data.py rename to src/skillmodels/common/process_data.py index 72f731fe..8e3f48de 100644 --- a/src/skillmodels/process_data.py +++ b/src/skillmodels/common/process_data.py @@ -8,7 +8,7 @@ import pandas as pd from jax import Array -from skillmodels.types import Anchoring, Labels +from skillmodels.common.types import Anchoring, Labels def process_data( diff --git a/src/skillmodels/process_model.py b/src/skillmodels/common/process_model.py similarity index 98% rename from src/skillmodels/process_model.py rename to src/skillmodels/common/process_model.py index 43b5aedc..8659d489 100644 --- a/src/skillmodels/process_model.py +++ b/src/skillmodels/common/process_model.py @@ -12,11 +12,11 @@ from jax import Array, vmap from pandas import DataFrame -import skillmodels.transition_functions as t_f_module -from skillmodels.check_model import check_model, check_stagemap -from skillmodels.decorators import extract_params, jax_array_output -from skillmodels.model_spec import FactorSpec, ModelSpec -from skillmodels.types import ( +import skillmodels.common.transition_functions as t_f_module +from skillmodels.common.check_model import check_model, check_stagemap +from skillmodels.common.decorators import extract_params, jax_array_output +from skillmodels.common.model_spec import FactorSpec, ModelSpec +from skillmodels.common.types import ( Anchoring, Dimensions, EndogenousFactorsInfo, diff --git a/src/skillmodels/simulate_data.py b/src/skillmodels/common/simulate_data.py similarity index 97% rename from src/skillmodels/simulate_data.py rename to src/skillmodels/common/simulate_data.py index 13b9c005..cf120e82 100644 --- a/src/skillmodels/simulate_data.py +++ b/src/skillmodels/common/simulate_data.py @@ -9,15 +9,15 @@ from jax import Array from numpy.typing import NDArray -from skillmodels.filtered_states import anchor_states_df -from skillmodels.kalman_filters import transform_sigma_points -from skillmodels.model_spec import ModelSpec -from skillmodels.params_index import get_params_index -from skillmodels.parse_params import create_parsing_info, parse_params -from skillmodels.process_data import process_data -from skillmodels.process_debug_data import create_state_ranges -from skillmodels.process_model import process_model -from skillmodels.types import ( +from skillmodels.chs.filtered_states import anchor_states_df +from skillmodels.chs.kalman_filters import transform_sigma_points +from skillmodels.chs.process_debug_data import create_state_ranges +from skillmodels.common.model_spec import ModelSpec +from skillmodels.common.params_index import get_params_index +from skillmodels.common.parse_params import create_parsing_info, parse_params +from skillmodels.common.process_data import process_data +from skillmodels.common.process_model import process_model +from skillmodels.common.types import ( Dimensions, EndogenousFactorsInfo, Labels, diff --git a/src/skillmodels/transition_functions.py b/src/skillmodels/common/transition_functions.py similarity index 73% rename from src/skillmodels/transition_functions.py rename to src/skillmodels/common/transition_functions.py index 9e384614..00a13525 100644 --- a/src/skillmodels/transition_functions.py +++ b/src/skillmodels/common/transition_functions.py @@ -37,7 +37,7 @@ from jax import Array if TYPE_CHECKING: - from skillmodels.constraints import FixedConstraintWithValue + from skillmodels.common.constraints import FixedConstraintWithValue def select_by_loc(params: Any, loc: Any) -> Any: # noqa: ANN401 @@ -63,7 +63,7 @@ def identity_constraints_linear( all_factors: tuple[str, ...], ) -> list[FixedConstraintWithValue]: """Identity constraints for linear transition function.""" - from skillmodels.constraints import FixedConstraintWithValue # noqa: PLC0415 + from skillmodels.common.constraints import FixedConstraintWithValue # noqa: PLC0415 constraints: list[FixedConstraintWithValue] = [] for regressor in params_linear(all_factors): @@ -111,7 +111,7 @@ def identity_constraints_translog( all_factors: tuple[str, ...], ) -> list[FixedConstraintWithValue]: """Identity constraints for translog transition function.""" - from skillmodels.constraints import FixedConstraintWithValue # noqa: PLC0415 + from skillmodels.common.constraints import FixedConstraintWithValue # noqa: PLC0415 constraints: list[FixedConstraintWithValue] = [] for regressor in params_translog(all_factors): @@ -122,17 +122,22 @@ def identity_constraints_translog( def log_ces(states: Array, params: Array) -> Array: - """Log CES production function (KLS version).""" + """Log CES production function (KLS version). + + Computed as ``log(sum_i gamma_i * exp(states_i * phi)) / phi`` via a + numerically stable weighted logsumexp. The weighted form keeps both the + forward pass and the gradient finite when some ``gamma_i = 0``; the + naive ``logsumexp(log(gamma) + states * phi)`` has a 1 / gamma term in + the gradient that produces NaN at ``gamma_i = 0``. + """ phi = params[-1] gammas = params[:-1] scaling_factor = 1 / phi - # note: once the b argument is supported in jax.scipy.special.logsumexp, we can set - # b = gammas instead of adding the log of gammas to sigma_points * phi - - # the log step for gammas underflows for gamma = 0, but this is handled correctly - # by logsumexp and does not raise a warning. - unscaled = jax.scipy.special.logsumexp(jnp.log(gammas) + states * phi) + exponents = states * phi + max_exp = jnp.max(exponents) + shifted = jnp.exp(exponents - max_exp) + unscaled = max_exp + jnp.log(jnp.sum(gammas * shifted)) return unscaled * scaling_factor @@ -161,6 +166,58 @@ def identity_constraints_log_ces( raise NotImplementedError +def log_ces_with_constant(states: Array, params: Array) -> Array: + """Log CES production function with an additive level constant. + + Computed as ``A + (1/phi) * log(sum_i gamma_i * exp(states_i * phi))``, + matching MATLAB's AF reference parametrisation + ``log_skills_{t+1} = log(A_t) + (1/sigma) log(sum gamma_i theta_i^sigma)``. + + The plain ``log_ces`` lacks the constant ``A``, which forces models with + a non-trivial ``A`` (e.g. AF Sec. 5.1's CES sims with ``A = e``) to + absorb the level shift into the next-period skills measurement + intercepts. When matching the MATLAB sim parametrisation exactly + (all skill intercepts pinned to 0, ``A_t`` free per period), use + this variant instead. + """ + constant_term = params[-1] + phi = params[-2] + gammas = params[:-2] + scaling_factor = 1 / phi + + exponents = states * phi + max_exp = jnp.max(exponents) + shifted = jnp.exp(exponents - max_exp) + unscaled = max_exp + jnp.log(jnp.sum(gammas * shifted)) + return constant_term + unscaled * scaling_factor + + +def params_log_ces_with_constant(factors: tuple[str, ...]) -> list[str]: + """Index tuples for ``log_ces_with_constant``.""" + return [*factors, "phi", "constant"] + + +def constraints_log_ces_with_constant( + factor: str, + factors: tuple[str, ...], + aug_period: int, +) -> om.constraints.Constraint: + """Constraints for ``log_ces_with_constant`` (gammas on the simplex).""" + names = params_log_ces_with_constant(factors) + # Gammas are everything except the last two entries (phi and constant). + loc = [("transition", aug_period, factor, name) for name in names[:-2]] + return om.ProbabilityConstraint(selector=functools.partial(select_by_loc, loc=loc)) + + +def identity_constraints_log_ces_with_constant( + factors: tuple[str, ...], + aug_period: int, + all_factors: tuple[str, ...], +) -> list[om.constraints.Constraint]: + """Identity constraints for ``log_ces_with_constant``.""" + raise NotImplementedError + + def constant(state: Array, params: Array) -> Array: # noqa: ARG001 """Constant production function.""" return state @@ -227,7 +284,7 @@ def identity_constraints_linear_and_squares( all_factors: tuple[str, ...], ) -> list[FixedConstraintWithValue]: """Identity constraints for linear_and_squares transition function.""" - from skillmodels.constraints import FixedConstraintWithValue # noqa: PLC0415 + from skillmodels.common.constraints import FixedConstraintWithValue # noqa: PLC0415 constraints: list[FixedConstraintWithValue] = [] for regressor in params_linear_and_squares(all_factors): diff --git a/src/skillmodels/types.py b/src/skillmodels/common/types.py similarity index 96% rename from src/skillmodels/types.py rename to src/skillmodels/common/types.py index 05c8a795..16564a9e 100644 --- a/src/skillmodels/types.py +++ b/src/skillmodels/common/types.py @@ -5,7 +5,7 @@ from dataclasses import dataclass, field from enum import Enum, auto from types import MappingProxyType -from typing import Any, NewType, cast +from typing import Any, Literal, NewType, cast import pandas as pd from jax import Array @@ -216,6 +216,16 @@ class EstimationOptions: """Hardness of lower clipping.""" clipping_upper_hardness: float = 1 """Hardness of upper clipping.""" + start_params_strategy: Literal["none", "spearman", "amn"] = "amn" + """How to populate the `value` column of the `params_template`. + + `"amn"` (default) runs the full Attanasio-Meghir-Nix (2020) + three-stage estimator and uses its parameter estimates as starting + values for the downstream MLE. `"spearman"` seeds free entries + from Spearman cross-covariance / Bartlett-OLS moments only (fast + but less accurate on non-Gaussian factor distributions). `"none"` + leaves free entries as `NaN` so the caller can fill them. + """ def __post_init__(self) -> None: # noqa: D105 if not self.robust_bounds: diff --git a/src/skillmodels/utilities.py b/src/skillmodels/common/utilities.py similarity index 98% rename from src/skillmodels/utilities.py rename to src/skillmodels/common/utilities.py index 9b69945f..9fcb9894 100644 --- a/src/skillmodels/utilities.py +++ b/src/skillmodels/common/utilities.py @@ -6,13 +6,13 @@ import numpy as np import pandas as pd -from skillmodels.model_spec import ( +from skillmodels.common.model_spec import ( FactorSpec, ModelSpec, Normalizations, ) -from skillmodels.params_index import get_params_index -from skillmodels.process_model import ( +from skillmodels.common.params_index import get_params_index +from skillmodels.common.process_model import ( get_dimensions, get_has_endogenous_factors, process_model, diff --git a/src/skillmodels/utils_plotting.py b/src/skillmodels/common/utils_plotting.py similarity index 100% rename from src/skillmodels/utils_plotting.py rename to src/skillmodels/common/utils_plotting.py diff --git a/src/skillmodels/variance_decomposition.py b/src/skillmodels/common/variance_decomposition.py similarity index 84% rename from src/skillmodels/variance_decomposition.py rename to src/skillmodels/common/variance_decomposition.py index e92d055e..019fb6ea 100644 --- a/src/skillmodels/variance_decomposition.py +++ b/src/skillmodels/common/variance_decomposition.py @@ -6,18 +6,26 @@ """ from collections.abc import Mapping +from typing import TYPE_CHECKING import pandas as pd -from skillmodels.filtered_states import get_filtered_states -from skillmodels.model_spec import ModelSpec -from skillmodels.process_model import process_model +from skillmodels.chs.filtered_states import get_filtered_states +from skillmodels.common.model_spec import ModelSpec +from skillmodels.common.process_model import process_model + +if TYPE_CHECKING: + from skillmodels.af.types import AFEstimationResult + from skillmodels.amn.types import AMNEstimationResult def decompose_measurement_variance( model_spec: ModelSpec, params: pd.DataFrame, data: pd.DataFrame, + *, + af_result: AFEstimationResult | None = None, + amn_result: AMNEstimationResult | None = None, ) -> pd.DataFrame: """Decompose measurement variance into signal and noise components. @@ -35,6 +43,10 @@ def decompose_measurement_variance( model_spec: The model specification. params: DataFrame with estimated model parameters. data: Empirical dataset used to estimate the model. + af_result: Optional AF estimation result; routes the filtered + states through the AF posterior path. + amn_result: Optional AMN estimation result; routes through the + AMN mixture-Schur posterior path. Returns: DataFrame indexed by (period, measurement, factor) with columns: @@ -51,11 +63,20 @@ def decompose_measurement_variance( 78(3), 883-931. https://doi.org/10.3982/ECTA6551 """ - # Get filtered states to compute factor variances + # Get filtered states to compute factor variances. CHS produces both + # anchored and unanchored states; AF / AMN produce unanchored only, + # so we fall back to unanchored states in that case. filtered_result = get_filtered_states( - model_spec=model_spec, data=data, params=params + model_spec=model_spec, + data=data, + params=params, + af_result=af_result, + amn_result=amn_result, + ) + states_root = filtered_result.get( + "anchored_states", filtered_result["unanchored_states"] ) - filtered_states = filtered_result["anchored_states"]["states"] + filtered_states = states_root["states"] processed_model = process_model(model_spec) return _compute_variance_decomposition( diff --git a/src/skillmodels/visualize_factor_distributions.py b/src/skillmodels/common/visualize_factor_distributions.py similarity index 91% rename from src/skillmodels/visualize_factor_distributions.py rename to src/skillmodels/common/visualize_factor_distributions.py index c6ca7a9b..5a39c2f4 100644 --- a/src/skillmodels/visualize_factor_distributions.py +++ b/src/skillmodels/common/visualize_factor_distributions.py @@ -3,7 +3,7 @@ import warnings from collections.abc import Mapping from copy import deepcopy -from typing import Any +from typing import TYPE_CHECKING, Any import numpy as np import pandas as pd @@ -14,11 +14,34 @@ from plotly.subplots import make_subplots from scipy.stats import gaussian_kde -from skillmodels.filtered_states import get_filtered_states -from skillmodels.model_spec import ModelSpec -from skillmodels.process_model import process_model -from skillmodels.types import ProcessedModel -from skillmodels.utils_plotting import get_layout_kwargs, get_make_subplot_kwargs +from skillmodels.chs.filtered_states import get_filtered_states +from skillmodels.common.model_spec import ModelSpec +from skillmodels.common.process_model import process_model +from skillmodels.common.types import ProcessedModel +from skillmodels.common.utils_plotting import get_layout_kwargs, get_make_subplot_kwargs + +if TYPE_CHECKING: + from skillmodels.af.types import AFEstimationResult + from skillmodels.amn.types import AMNEstimationResult + + +def _filtered_states_for_viz( + model_spec: ModelSpec, + data: pd.DataFrame, + params: pd.DataFrame, + af_result: AFEstimationResult | None, + amn_result: AMNEstimationResult | None, +) -> pd.DataFrame: + """Dispatch through `get_filtered_states`; prefer anchored states when available.""" + out = get_filtered_states( + model_spec=model_spec, + data=data, + params=params, + af_result=af_result, + amn_result=amn_result, + ) + root = out.get("anchored_states", out["unanchored_states"]) + return root["states"] def combine_distribution_plots( @@ -168,6 +191,8 @@ def univariate_densities( *, observed_factors: bool = False, states: pd.DataFrame | dict[str, pd.DataFrame] | list[pd.DataFrame] | None = None, + af_result: AFEstimationResult | None = None, + amn_result: AMNEstimationResult | None = None, show_curve: bool = True, show_hist: bool = False, show_rug: bool = False, @@ -193,6 +218,10 @@ def univariate_densities( states: Filtered or simulated states. Can be a single DataFrame, a list, or a dictionary of DataFrames. If None, retrieve filtered states using model and data. Used to estimate state ranges and factor distributions. + af_result: Optional AF estimation result; routes the internal + filtered-states call through the AF posterior path. + amn_result: Optional AMN estimation result; routes through the + AMN mixture-Schur posterior path. show_hist: Add histogram to the distplot. show_curve: Add density curve to the distplot. show_rug: Add rug to the distplot. @@ -216,9 +245,9 @@ def univariate_densities( """ if states is None: - states = get_filtered_states(model_spec=model_spec, data=data, params=params)[ - "anchored_states" - ]["states"] + states = _filtered_states_for_viz( + model_spec, data, params, af_result, amn_result + ) processed_model = process_model(model_spec) factors = _get_factors( model=processed_model, @@ -275,6 +304,8 @@ def bivariate_density_contours( *, observed_factors: bool = False, states: pd.DataFrame | dict[str, pd.DataFrame] | list[pd.DataFrame] | None = None, + af_result: AFEstimationResult | None = None, + amn_result: AMNEstimationResult | None = None, n_points: int = 50, contour_kwargs: dict[str, Any] | None = None, layout_kwargs: dict[str, Any] | None = None, @@ -300,6 +331,10 @@ def bivariate_density_contours( states: Filtered or simulated states. Can be a single DataFrame, a list, or a dictionary of DataFrames. If None, retrieve filtered states using model and data. Used to estimate state ranges and factor distributions. + af_result: Optional AF estimation result; routes the internal + filtered-states call through the AF posterior path. + amn_result: Optional AMN estimation result; routes through the + AMN mixture-Schur posterior path. n_points: Number of grid points used to create the mesh for calculation of kernel densities. contour_kwargs: Keyword arguments to set contour line properties @@ -327,9 +362,9 @@ def bivariate_density_contours( """ if states is None: - states = get_filtered_states(model_spec=model_spec, data=data, params=params)[ - "anchored_states" - ]["states"] + states = _filtered_states_for_viz( + model_spec, data, params, af_result, amn_result + ) processed_model = process_model(model_spec) factors = _get_factors( model=processed_model, @@ -401,6 +436,8 @@ def bivariate_density_surfaces( *, observed_factors: bool = False, states: pd.DataFrame | None = None, + af_result: AFEstimationResult | None = None, + amn_result: AMNEstimationResult | None = None, n_points: int = 50, layout_kwargs: dict[str, Any] | None = None, colorscale: str = "RdBu_r", @@ -426,6 +463,10 @@ def bivariate_density_surfaces( states: Filtered or simulated states as a single DataFrame. If None, retrieve filtered states using model and data. Used to estimate state ranges and factor distributions. + af_result: Optional AF estimation result; routes the internal + filtered-states call through the AF posterior path. + amn_result: Optional AMN estimation result; routes through the + AMN mixture-Schur posterior path. n_points: Number of grid points used to create the mesh for calculation of kernel densities. @@ -449,9 +490,9 @@ def bivariate_density_surfaces( """ if states is None: - states = get_filtered_states(model_spec=model_spec, data=data, params=params)[ - "anchored_states" - ]["states"] + states = _filtered_states_for_viz( + model_spec, data, params, af_result, amn_result + ) elif not isinstance(states, pd.DataFrame): raise ValueError("3d plots are only supported if states is a DataFrame") processed_model = process_model(model_spec) diff --git a/src/skillmodels/visualize_transition_equations.py b/src/skillmodels/common/visualize_transition_equations.py similarity index 95% rename from src/skillmodels/visualize_transition_equations.py rename to src/skillmodels/common/visualize_transition_equations.py index e74e51b0..a3b97a02 100644 --- a/src/skillmodels/visualize_transition_equations.py +++ b/src/skillmodels/common/visualize_transition_equations.py @@ -3,7 +3,7 @@ import itertools from collections.abc import Callable, Mapping, Sequence from copy import deepcopy -from typing import Any, Literal +from typing import TYPE_CHECKING, Any, Literal import jax.numpy as jnp import numpy as np @@ -13,15 +13,19 @@ from plotly import graph_objects as go from plotly.subplots import make_subplots -from skillmodels.filtered_states import get_filtered_states -from skillmodels.model_spec import ModelSpec -from skillmodels.params_index import get_params_index -from skillmodels.parse_params import create_parsing_info, parse_params -from skillmodels.process_data import process_data -from skillmodels.process_debug_data import create_state_ranges -from skillmodels.process_model import process_model -from skillmodels.types import ParsedParams, ProcessedModel -from skillmodels.utils_plotting import get_layout_kwargs, get_make_subplot_kwargs +from skillmodels.chs.filtered_states import get_filtered_states +from skillmodels.chs.process_debug_data import create_state_ranges +from skillmodels.common.model_spec import ModelSpec +from skillmodels.common.params_index import get_params_index +from skillmodels.common.parse_params import create_parsing_info, parse_params +from skillmodels.common.process_data import process_data +from skillmodels.common.process_model import process_model +from skillmodels.common.types import ParsedParams, ProcessedModel +from skillmodels.common.utils_plotting import get_layout_kwargs, get_make_subplot_kwargs + +if TYPE_CHECKING: + from skillmodels.af.types import AFEstimationResult + from skillmodels.amn.types import AMNEstimationResult def combine_transition_plots( @@ -160,6 +164,8 @@ def get_transition_plots( # noqa: C901, PLR0912 layout_kwargs: dict[str, Any] | None = None, *, states: pd.DataFrame | None = None, + af_result: AFEstimationResult | None = None, + amn_result: AMNEstimationResult | None = None, include_correction_factors: bool = False, ) -> dict[tuple[str, str], go.Figure]: """Get dictionary with individual plots of transition equations for each factor. @@ -198,6 +204,10 @@ def get_transition_plots( # noqa: C901, PLR0912 defined in the function will be used. states: Pre-computed filtered states DataFrame (with a `period` column). If provided, skip the internal `get_filtered_states` call. + af_result: Optional AF estimation result; routes the internal + filtered-states call through the AF posterior path. + amn_result: Optional AMN estimation result; routes through the + AMN mixture-Schur posterior path. include_correction_factors: Whether to include correction factors in the plots. Default False. @@ -255,9 +265,15 @@ def get_transition_plots( # noqa: C901, PLR0912 if data is None: msg = "Either 'data' or 'states' must be provided." raise TypeError(msg) - states = get_filtered_states(model_spec=model_spec, data=data, params=params)[ - "anchored_states" - ]["states"] + filtered = get_filtered_states( + model_spec=model_spec, + data=data, + params=params, + af_result=af_result, + amn_result=amn_result, + ) + states_root = filtered.get("anchored_states", filtered["unanchored_states"]) + states = states_root["states"] states = _normalize_states_columns( states, diff --git a/src/skillmodels/config.py b/src/skillmodels/config.py deleted file mode 100644 index cd7eb32b..00000000 --- a/src/skillmodels/config.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Configuration constants and paths for skillmodels.""" - -from pathlib import Path - -TEST_DATA_DIR = Path(__file__).resolve().parent / "test_data" -REGRESSION_VAULT = ( - Path(__file__).resolve().parent.parent.parent / "tests" / "regression_vault" -) diff --git a/src/skillmodels/test_data/model2.py b/src/skillmodels/test_data/model2.py index dc5a160b..0ac405a0 100644 --- a/src/skillmodels/test_data/model2.py +++ b/src/skillmodels/test_data/model2.py @@ -5,7 +5,7 @@ anchoring of fac1 to outcome Q1 and a single control variable x1. """ -from skillmodels.model_spec import ( +from skillmodels.common.model_spec import ( AnchoringSpec, EstimationOptions, FactorSpec, @@ -53,5 +53,10 @@ robust_bounds=True, bounds_distance=0.001, n_mixtures=1, + # Tests using this fixture run `get_maximization_inputs` for + # shape and value checks rather than full estimation; opt into + # the cheap Spearman start-value path so the fixture stays fast. + # End-user defaults (EstimationOptions()) keep `"amn"`. + start_params_strategy="spearman", ), ) diff --git a/src/skillmodels/test_data/simplest_augmented_model.py b/src/skillmodels/test_data/simplest_augmented_model.py index fd481723..0c632206 100644 --- a/src/skillmodels/test_data/simplest_augmented_model.py +++ b/src/skillmodels/test_data/simplest_augmented_model.py @@ -5,7 +5,7 @@ periods. Used for testing endogenous factor augmentation. """ -from skillmodels.model_spec import ( +from skillmodels.common.model_spec import ( EstimationOptions, FactorSpec, ModelSpec, @@ -35,5 +35,9 @@ observed_factors=("of",), estimation_options=EstimationOptions( bounds_distance=1e-8, + # Tests using this fixture exercise CHS plumbing rather than + # full estimation; opt into the cheap Spearman start-value + # path so collection stays fast. End-user defaults remain "amn". + start_params_strategy="spearman", ), ) diff --git a/tests/conftest.py b/tests/conftest.py index 89d6f081..d3a2043d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -6,7 +6,7 @@ import pandas as pd import pytest -from skillmodels.config import TEST_DATA_DIR +from skillmodels.common.config import TEST_DATA_DIR from skillmodels.test_data.model2 import MODEL2 REGRESSION_VAULT = Path(__file__).parent / "regression_vault" diff --git a/tests/test_af_batching.py b/tests/test_af_batching.py new file mode 100644 index 00000000..18681d5c --- /dev/null +++ b/tests/test_af_batching.py @@ -0,0 +1,139 @@ +"""Tests for the AF memory-aware batching helpers.""" + +import os + +import jax +import jax.numpy as jnp +import numpy as np +import pytest + +from skillmodels.af.batching import ( + _DEFAULT_TARGET_BATCH_BYTES, + _ENV_VAR_TARGET, + auto_n_obs_per_batch, + target_batch_bytes, +) +from skillmodels.af.likelihood import _map_over_obs + +jax.config.update("jax_enable_x64", val=True) + + +def _square_sum(x: jnp.ndarray) -> jnp.ndarray: + return jnp.sum(x**2) + + +def _two_arg(x: jnp.ndarray, y: jnp.ndarray) -> jnp.ndarray: + return jnp.sum(x * y) + + +@pytest.mark.parametrize("batch_size", [None, 1, 3, 7, 100]) +def test_map_over_obs_matches_vmap_for_every_batch_size(batch_size: int | None) -> None: + """The chunked ``_map_over_obs`` must match ``jax.vmap`` exactly.""" + rng = np.random.default_rng(0) + xs = jnp.asarray(rng.normal(size=(20, 5))) + + expected = jax.vmap(_square_sum)(xs) + actual = _map_over_obs(_square_sum, xs, n_obs_per_batch=batch_size) + + # 1 ULP differences are allowed because `lax.map` may use a different + # reduction order than `vmap`. + np.testing.assert_allclose( + np.asarray(actual), np.asarray(expected), rtol=0, atol=1e-13 + ) + + +@pytest.mark.parametrize("batch_size", [None, 1, 5]) +def test_map_over_obs_two_args(batch_size: int | None) -> None: + rng = np.random.default_rng(1) + xs = jnp.asarray(rng.normal(size=(15, 3))) + ys = jnp.asarray(rng.normal(size=(15, 3))) + + expected = jax.vmap(_two_arg)(xs, ys) + actual = _map_over_obs(_two_arg, xs, ys, n_obs_per_batch=batch_size) + + np.testing.assert_allclose( + np.asarray(actual), np.asarray(expected), rtol=0, atol=1e-14 + ) + + +def test_map_over_obs_preserves_gradient() -> None: + """Reverse-mode gradient must not depend on the chunk size.""" + rng = np.random.default_rng(2) + xs = jnp.asarray(rng.normal(size=(12, 4))) + + def _loss(xs_flat: jnp.ndarray, batch: int | None) -> jnp.ndarray: + xs_r = xs_flat.reshape((12, 4)) + return jnp.sum(_map_over_obs(_square_sum, xs_r, n_obs_per_batch=batch)) + + g_full = jax.grad(lambda x: _loss(x, None))(xs.reshape(-1)) + g_chunked = jax.grad(lambda x: _loss(x, 3))(xs.reshape(-1)) + + np.testing.assert_allclose( + np.asarray(g_chunked), np.asarray(g_full), rtol=0, atol=1e-10 + ) + + +def test_target_batch_bytes_default() -> None: + os.environ.pop(_ENV_VAR_TARGET, None) + assert target_batch_bytes() == _DEFAULT_TARGET_BATCH_BYTES + + +def test_target_batch_bytes_env_override() -> None: + os.environ[_ENV_VAR_TARGET] = "1048576" + try: + assert target_batch_bytes() == 1_048_576 + finally: + del os.environ[_ENV_VAR_TARGET] + + +def test_target_batch_bytes_rejects_junk() -> None: + os.environ[_ENV_VAR_TARGET] = "not-a-number" + try: + assert target_batch_bytes() == _DEFAULT_TARGET_BATCH_BYTES + finally: + del os.environ[_ENV_VAR_TARGET] + + +def test_auto_n_obs_per_batch_small_problem_uses_all() -> None: + """Tiny problems fit easily; the whole batch should run in one shot.""" + batch = auto_n_obs_per_batch( + n_obs=100, + n_halton_points=20, + n_halton_points_shock=10, + n_latent=2, + n_endogenous=0, + ) + assert batch == 100 + + +def test_auto_n_obs_per_batch_large_problem_splits() -> None: + """Large problems need to be chunked; the result is smaller than n_obs.""" + batch = auto_n_obs_per_batch( + n_obs=1403, + n_halton_points=20_000, + n_halton_points_shock=20_000, + n_latent=4, + n_endogenous=1, + ) + assert 1 <= batch < 1403 + + +def test_auto_n_obs_per_batch_respects_target_bytes() -> None: + """A bigger budget should allow a larger batch (monotone in the budget).""" + small = auto_n_obs_per_batch( + n_obs=10_000, + n_halton_points=200, + n_halton_points_shock=50, + n_latent=2, + n_endogenous=1, + target_bytes=2**24, + ) + large = auto_n_obs_per_batch( + n_obs=10_000, + n_halton_points=200, + n_halton_points_shock=50, + n_latent=2, + n_endogenous=1, + target_bytes=2**30, + ) + assert small <= large diff --git a/tests/test_af_equality_propagation.py b/tests/test_af_equality_propagation.py new file mode 100644 index 00000000..f3d83fb1 --- /dev/null +++ b/tests/test_af_equality_propagation.py @@ -0,0 +1,248 @@ +"""Cross-period equality propagation in `estimate_af`. + +skane-struct-bw and similar applications impose equality constraints +across aug-periods (e.g., shock_sds, transition coefficients constant +within a stage, loadings/meas_sds constant across periods). AF's +sequential MLE estimates each period independently and would silently +violate those constraints; the new `constraints=` kwarg on +`estimate_af` propagates equality groups by pinning every member of a +group to whichever member is estimated first. + +These tests exercise the propagation directly via the helpers and +end-to-end via a small synthetic T=3 fit. +""" + +import functools + +import jax +import numpy as np +import optimagic as om +import pandas as pd +import pytest + +from skillmodels.af import AFEstimationOptions, estimate_af +from skillmodels.af.estimate import ( + _extract_equality_groups, + _propagate_equality_groups, +) +from skillmodels.af.types import AFPeriodResult +from skillmodels.common.constraints import select_by_loc +from skillmodels.common.model_spec import ( + EstimationOptions, + FactorSpec, + ModelSpec, + Normalizations, +) +from skillmodels.common.params_index import get_params_index +from skillmodels.common.process_model import process_model + +jax.config.update("jax_enable_x64", True) + + +def _equality_constraint(loc: pd.MultiIndex) -> om.EqualityConstraint: + return om.EqualityConstraint( + selector=functools.partial(select_by_loc, loc=loc), + ) + + +def test_extract_equality_groups_returns_only_equality_constraints() -> None: + loc = pd.MultiIndex.from_tuples( + [("transition", 0, "fac1", "fac1"), ("transition", 1, "fac1", "fac1")], + names=["category", "period", "name1", "name2"], + ) + constraints: list[om.constraints.Constraint] = [ + _equality_constraint(loc), + om.FixedConstraint(selector=functools.partial(select_by_loc, loc=loc)), + ] + groups = _extract_equality_groups(constraints) + assert len(groups) == 1 + assert groups[0].equals(loc) + + +def test_extract_equality_groups_handles_empty_input() -> None: + assert _extract_equality_groups(None) == [] + assert _extract_equality_groups([]) == [] + + +def test_propagate_equality_groups_pins_other_periods() -> None: + period_0 = AFPeriodResult( + period=0, + params=pd.DataFrame( + {"value": [0.42]}, + index=pd.MultiIndex.from_tuples( + [("shock_sds", 0, "skills", "-")], + names=["category", "period", "name1", "name2"], + ), + ), + loglikelihood=-1.0, + success=True, + optimize_result=None, + ) + group = pd.MultiIndex.from_tuples( + [ + ("shock_sds", 0, "skills", "-"), + ("shock_sds", 1, "skills", "-"), + ("shock_sds", 2, "skills", "-"), + ], + names=["category", "period", "name1", "name2"], + ) + fixed_params = _propagate_equality_groups( + period_results=[period_0], + fixed_params=None, + equality_groups=[group], + ) + assert fixed_params is not None + assert ("shock_sds", 1, "skills", "-") in fixed_params.index + assert ("shock_sds", 2, "skills", "-") in fixed_params.index + assert fixed_params.loc[("shock_sds", 1, "skills", "-"), "value"] == 0.42 + assert fixed_params.loc[("shock_sds", 2, "skills", "-"), "value"] == 0.42 + + +def test_propagate_equality_groups_respects_existing_pins() -> None: + period_0 = AFPeriodResult( + period=0, + params=pd.DataFrame( + {"value": [0.42]}, + index=pd.MultiIndex.from_tuples( + [("shock_sds", 0, "skills", "-")], + names=["category", "period", "name1", "name2"], + ), + ), + loglikelihood=-1.0, + success=True, + optimize_result=None, + ) + fixed_params_initial = pd.DataFrame( + {"value": [0.99]}, + index=pd.MultiIndex.from_tuples( + [("shock_sds", 1, "skills", "-")], + names=["category", "period", "name1", "name2"], + ), + ) + group = pd.MultiIndex.from_tuples( + [("shock_sds", 0, "skills", "-"), ("shock_sds", 1, "skills", "-")], + names=["category", "period", "name1", "name2"], + ) + out = _propagate_equality_groups( + period_results=[period_0], + fixed_params=fixed_params_initial, + equality_groups=[group], + ) + assert out is not None + assert out.loc[("shock_sds", 1, "skills", "-"), "value"] == 0.99 + + +def _build_t3_model() -> ModelSpec: + return ModelSpec( + factors={ + "state": FactorSpec( + measurements=(("y1", "y2", "y3"),) * 3, + normalizations=Normalizations( + loadings=({"y1": 1},) * 3, + intercepts=({"y1": 0},) * 3, + ), + transition_function="linear", + ), + }, + estimation_options=EstimationOptions( + robust_bounds=True, + bounds_distance=0.001, + n_mixtures=1, + ), + ) + + +def _truth_params(model: ModelSpec) -> pd.DataFrame: + processed = process_model(model) + p_index = get_params_index( + update_info=processed.update_info, + labels=processed.labels, + dimensions=processed.dimensions, + transition_info=processed.transition_info, + endogenous_factors_info=processed.endogenous_factors_info, + ) + df = pd.DataFrame({"value": np.zeros(len(p_index))}, index=p_index) + cat = df.index.get_level_values("category") + df.loc[cat == "loadings", "value"] = 1.0 + df.loc[cat == "meas_sds", "value"] = 0.3 + df.loc[cat == "shock_sds", "value"] = 0.4 + df.loc[cat == "mixture_weights", "value"] = 1.0 + for aug in range(2): + df.loc[("transition", aug, "state", "state"), "value"] = 0.8 + df.loc[("transition", aug, "state", "constant"), "value"] = 0.0 + diag_mask = pd.Series( + [ + idx[0] == "initial_cholcovs" + and "-" in idx[3] + and idx[3].split("-")[0] == idx[3].split("-")[1] + for idx in df.index + ], + index=df.index, + ) + df.loc[diag_mask, "value"] = 1.0 + return df + + +def _simulate_t3( + model: ModelSpec, params: pd.DataFrame, n_obs: int, seed: int +) -> pd.DataFrame: + rng = np.random.default_rng(seed) + states: list[np.ndarray] = [rng.normal(0.0, 1.0, size=n_obs)] + + def _val(loc: tuple) -> float: + return float(params.loc[loc, "value"]) + + for t in range(1, 3): + a = _val(("transition", t - 1, "state", "state")) + c = _val(("transition", t - 1, "state", "constant")) + sigma = _val(("shock_sds", t - 1, "state", "-")) + states.append(a * states[-1] + c + sigma * rng.normal(size=n_obs)) + rows: list[dict] = [] + for obs_id in range(n_obs): + for t in range(3): + row: dict[str, float | int] = {"caseid": obs_id, "period": t} + for k in (1, 2, 3): + meas = f"y{k}" + lam = _val(("loadings", t, meas, "state")) + eps = _val(("meas_sds", t, meas, "-")) + row[meas] = lam * states[t][obs_id] + eps * rng.normal() + rows.append(row) + return pd.DataFrame.from_records(rows).set_index(["caseid", "period"]) + + +@pytest.mark.end_to_end +def test_estimate_af_enforces_equality_across_periods() -> None: + """Pinning shock_sds equal across periods makes the chain return one value.""" + model = _build_t3_model() + params = _truth_params(model) + data = _simulate_t3(model, params, n_obs=300, seed=20260510) + + af_options = AFEstimationOptions( + n_halton_points=20, + n_halton_points_shock=10, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ) + + eq_loc = pd.MultiIndex.from_tuples( + [ + ("shock_sds", 0, "state", "-"), + ("shock_sds", 1, "state", "-"), + ], + names=["category", "period", "name1", "name2"], + ) + constraints: list[om.constraints.Constraint] = [_equality_constraint(eq_loc)] + + result = estimate_af( + model_spec=model, + data=data, + af_options=af_options, + constraints=constraints, + ) + + def _val(period_idx: int, loc: tuple) -> float: + return float(result.period_results[period_idx].params.loc[loc, "value"]) + + period1_sd = _val(1, ("shock_sds", 0, "state", "-")) + period2_sd = _val(2, ("shock_sds", 1, "state", "-")) + assert period1_sd == pytest.approx(period2_sd, rel=1e-9) diff --git a/tests/test_af_estimate.py b/tests/test_af_estimate.py new file mode 100644 index 00000000..f2693aa0 --- /dev/null +++ b/tests/test_af_estimate.py @@ -0,0 +1,2291 @@ +"""End-to-end tests for the AF estimator. + +Run AF estimation on MODEL2 test data and verify it produces reasonable +results, comparing to the CHS Kalman filter estimates where applicable. +""" + +from collections.abc import Callable +from pathlib import Path + +import jax +import jax.numpy as jnp +import numpy as np +import optimagic as om +import pandas as pd +import pytest + +from skillmodels.af import AFEstimationOptions, estimate_af +from skillmodels.af.likelihood import _rebuild_chain_at_period, af_loglike_transition +from skillmodels.af.types import ChainLink +from skillmodels.chs.filtered_states import get_filtered_states +from skillmodels.chs.maximization_inputs import get_maximization_inputs +from skillmodels.common.config import TEST_DATA_DIR +from skillmodels.common.decorators import register_params +from skillmodels.common.model_spec import ( + EstimationOptions, + FactorSpec, + ModelSpec, + Normalizations, +) + +jax.config.update("jax_enable_x64", True) + +REGRESSION_VAULT = Path(__file__).parent / "regression_vault" + + +@pytest.fixture +def model2_data(): + """Load the MODEL2 simulated dataset.""" + data = pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta") + return data.set_index(["caseid", "period"]) + + +@pytest.fixture +def model2_af(): + """Create an AF-compatible 2-factor model from MODEL2. + + Use fac1 (log_ces, 3 measures) and fac2 (linear, 3 measures). + Drop fac3 since it has measurements only in period 0. + Reduce to 3 periods for faster testing. + """ + return ModelSpec( + factors={ + "fac1": FactorSpec( + measurements=(("y1", "y2", "y3"),) * 3, + normalizations=Normalizations( + loadings=({"y1": 1},) * 3, + intercepts=({"y1": 0},) * 3, + ), + transition_function="log_ces", + ), + "fac2": FactorSpec( + measurements=(("y4", "y5", "y6"),) * 3, + normalizations=Normalizations( + loadings=({"y4": 1},) * 3, + intercepts=({"y4": 0},) * 3, + ), + transition_function="linear", + ), + }, + controls=("x1",), + estimation_options=EstimationOptions( + robust_bounds=True, + bounds_distance=0.001, + n_mixtures=1, + ), + ) + + +@pytest.fixture +def chs_params(): + """Load CHS-estimated parameters from regression vault.""" + params = pd.read_csv(REGRESSION_VAULT / "one_stage_anchoring.csv") + return params.set_index(["category", "period", "name1", "name2"]) + + +@pytest.mark.end_to_end +def test_af_estimate_runs_on_model2(model2_af, model2_data) -> None: + """Verify AF estimation runs to completion on MODEL2 data.""" + af_options = AFEstimationOptions( + n_halton_points=20, + n_halton_points_shock=10, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ) + + result = estimate_af( + model_spec=model2_af, + data=model2_data, + af_options=af_options, + ) + + # Basic checks + assert len(result.period_results) == 3 + assert result.all_params is not None + assert len(result.all_params) > 0 + + # Check each period converged (or at least produced finite likelihood) + for pr in result.period_results: + assert np.isfinite(pr.loglikelihood), ( + f"Period {pr.period}: non-finite log-likelihood {pr.loglikelihood}" + ) + + +@pytest.mark.end_to_end +def test_af_measurement_params_in_ballpark( + model2_af, + model2_data, + chs_params, +) -> None: + """Verify AF measurement parameter estimates are in the same ballpark as CHS. + + The two estimators use different methods, so exact agreement is not + expected. But measurement loadings and SDs should be roughly similar. + """ + af_options = AFEstimationOptions( + n_halton_points=30, + n_halton_points_shock=15, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ) + + result = estimate_af( + model_spec=model2_af, + data=model2_data, + af_options=af_options, + ) + + # Compare period 0 measurement SDs + af_meas_sds = result.all_params.query("category == 'meas_sds' and period == 0") + if len(af_meas_sds) > 0: + af_sd_values = af_meas_sds["value"].to_numpy() + # All SDs should be positive and not too extreme + assert (af_sd_values > 0).all(), "All measurement SDs should be positive" + assert (af_sd_values < 10).all(), ( + "Measurement SDs should not be unreasonably large" + ) + + +@pytest.mark.end_to_end +def test_af_estimate_single_factor() -> None: + """Test AF estimation with a single-factor model (simplest case).""" + # Create minimal model: 1 factor, 3 measures, 2 periods + model = ModelSpec( + factors={ + "skill": FactorSpec( + measurements=(("m1", "m2", "m3"),) * 2, + normalizations=Normalizations( + loadings=({"m1": 1},) * 2, + intercepts=({"m1": 0},) * 2, + ), + transition_function="linear", + ), + }, + estimation_options=EstimationOptions( + robust_bounds=True, + bounds_distance=0.001, + n_mixtures=1, + ), + ) + + # Generate simple synthetic data + rng = np.random.default_rng(42) + n_obs = 200 + n_periods = 2 + + # True latent factor + theta = rng.normal(0, 1, n_obs) + + rows = [] + for i in range(n_obs): + for t in range(n_periods): + row = { + "caseid": i, + "period": t, + "m1": theta[i] + rng.normal(0, 0.3), + "m2": 0.5 + 0.8 * theta[i] + rng.normal(0, 0.4), + "m3": -0.2 + 1.2 * theta[i] + rng.normal(0, 0.35), + } + rows.append(row) + + data = pd.DataFrame(rows).set_index(["caseid", "period"]) + + af_options = AFEstimationOptions( + n_halton_points=25, + n_halton_points_shock=10, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ) + + result = estimate_af(model_spec=model, data=data, af_options=af_options) + + assert len(result.period_results) == 2 + assert np.isfinite(result.period_results[0].loglikelihood) + + # Check that estimated loadings are roughly in the right direction + af_loadings = result.all_params.query("category == 'loadings' and period == 0") + if len(af_loadings) > 0: + # m1 loading on skill should be fixed at 1.0 + # m2 loading should be roughly 0.8 + # m3 loading should be roughly 1.2 + for _, row in af_loadings.iterrows(): + assert np.isfinite(row["value"]), "Loadings should be finite" + + +@pytest.mark.end_to_end +def test_af_vs_chs_measurement_params_agree() -> None: + """Verify AF and CHS produce similar measurement parameter estimates. + + Simulate data from a known single-factor model and estimate with both + AF and CHS. Period-0 measurement loadings, intercepts, and error SDs + should agree within tolerance. + """ + rng = np.random.default_rng(42) + n_obs = 500 + n_periods = 2 + + # True DGP parameters + true_loadings = {"m1": 1.0, "m2": 0.8, "m3": 1.2} + true_intercepts = {"m1": 0.0, "m2": 0.5, "m3": -0.2} + true_meas_sds = {"m1": 0.3, "m2": 0.4, "m3": 0.35} + + theta = rng.normal(0, 1, n_obs) + rows = [] + for i in range(n_obs): + for t in range(n_periods): + rows.append( + { + "caseid": i, + "period": t, + "m1": true_intercepts["m1"] + + true_loadings["m1"] * theta[i] + + rng.normal(0, true_meas_sds["m1"]), + "m2": true_intercepts["m2"] + + true_loadings["m2"] * theta[i] + + rng.normal(0, true_meas_sds["m2"]), + "m3": true_intercepts["m3"] + + true_loadings["m3"] * theta[i] + + rng.normal(0, true_meas_sds["m3"]), + } + ) + data = pd.DataFrame(rows).set_index(["caseid", "period"]) + + model = ModelSpec( + factors={ + "skill": FactorSpec( + measurements=(("m1", "m2", "m3"),) * n_periods, + normalizations=Normalizations( + loadings=({"m1": 1},) * n_periods, + intercepts=({"m1": 0},) * n_periods, + ), + transition_function="linear", + ), + }, + estimation_options=EstimationOptions( + robust_bounds=True, + bounds_distance=0.001, + n_mixtures=1, + ), + ) + + # --- AF estimation --- + af_result = estimate_af( + model_spec=model, + data=data, + af_options=AFEstimationOptions( + n_halton_points=50, + n_halton_points_shock=20, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ), + ) + af_p0 = af_result.period_results[0].params + + # --- CHS estimation (naive start: all free params = 0.1) --- + chs_est = _run_chs_estimation(model, data) + + # --- Compare period-0 measurement parameters --- + tol = 0.15 # generous tolerance for finite-sample differences + + for meas in ("m2", "m3"): + af_load = float( + af_p0.loc[("loadings", 0, meas, "skill"), "value"] # ty: ignore[invalid-argument-type] + ) + chs_load = float( + chs_est.loc[("loadings", 0, meas, "skill"), "value"] # ty: ignore[invalid-argument-type] + ) + assert abs(af_load - chs_load) < tol, ( + f"loading({meas}): AF={af_load:.4f} vs CHS={chs_load:.4f}" + ) + + af_intercept = float( + af_p0.loc[("controls", 0, meas, "constant"), "value"] # ty: ignore[invalid-argument-type] + ) + chs_intercept = float( + chs_est.loc[("controls", 0, meas, "constant"), "value"] # ty: ignore[invalid-argument-type] + ) + assert abs(af_intercept - chs_intercept) < tol, ( + f"intercept({meas}): AF={af_intercept:.4f} vs CHS={chs_intercept:.4f}" + ) + + for meas in ("m1", "m2", "m3"): + af_sd = float( + af_p0.loc[("meas_sds", 0, meas, "-"), "value"] # ty: ignore[invalid-argument-type] + ) + chs_sd = float( + chs_est.loc[("meas_sds", 0, meas, "-"), "value"] # ty: ignore[invalid-argument-type] + ) + assert abs(af_sd - chs_sd) < tol, ( + f"meas_sd({meas}): AF={af_sd:.4f} vs CHS={chs_sd:.4f}" + ) + + +# --------------------------------------------------------------------------- +# TDD tests for transition likelihood and parameter recovery +# --------------------------------------------------------------------------- + + +def _simulate_linear_transition_data( + *, + n_obs: int = 500, + n_periods: int = 3, + true_beta: float = 0.8, + true_constant: float = 0.1, + true_shock_sd: float = 0.3, + true_meas_sds: tuple[float, ...] = (0.3, 0.4, 0.35), + true_loadings: tuple[float, ...] = (1.0, 0.8, 1.2), + true_intercepts: tuple[float, ...] = (0.0, 0.5, -0.2), + seed: int = 42, +) -> tuple[pd.DataFrame, dict[str, float]]: + """Simulate panel data from a single-factor linear transition model. + + DGP: theta_{t+1} = constant + beta * theta_t + N(0, shock_sd^2). + Measurements: Z_{t,m} = intercept_m + loading_m * theta_t + noise. + + Return tuple of (DataFrame indexed by (caseid, period), dict of true params). + """ + rng = np.random.default_rng(seed) + theta = np.zeros((n_obs, n_periods)) + theta[:, 0] = rng.normal(0, 1, n_obs) + for t in range(n_periods - 1): + theta[:, t + 1] = ( + true_constant + + true_beta * theta[:, t] + + rng.normal(0, true_shock_sd, n_obs) + ) + + rows = [] + for i in range(n_obs): + for t in range(n_periods): + row = {"caseid": i, "period": t} + for m_idx, meas_name in enumerate(("m1", "m2", "m3")): + row[meas_name] = ( + true_intercepts[m_idx] + + true_loadings[m_idx] * theta[i, t] + + rng.normal(0, true_meas_sds[m_idx]) + ) + rows.append(row) + + data = pd.DataFrame(rows).set_index(["caseid", "period"]) + true_params = { + "beta": true_beta, + "constant": true_constant, + "shock_sd": true_shock_sd, + } + return data, true_params + + +def _make_linear_transition_model(n_periods: int = 3) -> ModelSpec: + """Create a single-factor linear transition model for testing.""" + return ModelSpec( + factors={ + "skill": FactorSpec( + measurements=(("m1", "m2", "m3"),) * n_periods, + normalizations=Normalizations( + loadings=({"m1": 1},) * n_periods, + intercepts=({"m1": 0},) * n_periods, + ), + transition_function="linear", + ), + }, + estimation_options=EstimationOptions( + robust_bounds=True, + bounds_distance=0.001, + n_mixtures=1, + ), + ) + + +@pytest.mark.end_to_end +def test_af_transition_params_affect_likelihood() -> None: + """Verify that the transition likelihood depends on transition parameters. + + If we run AF estimation with the transition function wired in correctly, + the estimated transition parameters should NOT be at their initial values. + The likelihood should be sensitive to transition parameter changes. + """ + data, _true_params = _simulate_linear_transition_data(n_obs=300, n_periods=3) + model = _make_linear_transition_model(n_periods=3) + + af_opts = AFEstimationOptions( + n_halton_points=30, + n_halton_points_shock=15, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ) + result = estimate_af(model_spec=model, data=data, af_options=af_opts) + + # Period 1 result should have transition params + p1 = result.period_results[1].params + trans_params = p1.query("category == 'transition'") + assert len(trans_params) > 0, "Should have transition parameters in period 1" + + # The transition params should NOT all be at their initialization value (0.1). + # If the transition function is actually used in the likelihood, the optimizer + # will move them away from 0.5 toward the true values. + trans_values = trans_params["value"].to_numpy() + init_values = np.full_like(trans_values, 0.5) + assert not np.allclose(trans_values, init_values, atol=0.01), ( + f"Transition params stuck at init values: {trans_values}. " + "The transition function is not being used in the likelihood." + ) + + +@pytest.mark.end_to_end +def test_af_recovers_linear_transition_params() -> None: + """Verify AF recovers known linear transition parameters from synthetic data. + + Simulate data with theta_{t+1} = 0.1 + 0.8 * theta_t + N(0, 0.3^2), + estimate with AF, and check that estimated beta and constant are close + to true values. + """ + data, true_params = _simulate_linear_transition_data(n_obs=500, n_periods=3) + model = _make_linear_transition_model(n_periods=3) + + af_opts = AFEstimationOptions( + n_halton_points=800, + n_halton_points_shock=20, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ) + result = estimate_af(model_spec=model, data=data, af_options=af_opts) + + # Extract estimated transition params from period 1 (transition 0->1) + p1 = result.period_results[1].params + + # For a linear transition with 1 factor "skill", params are: + # ("transition", 0, "skill", "skill") = beta + # ("transition", 0, "skill", "constant") = constant + est_beta = float( + p1.loc[("transition", 0, "skill", "skill"), "value"] # ty: ignore[invalid-argument-type] + ) + est_constant = float( + p1.loc[("transition", 0, "skill", "constant"), "value"] # ty: ignore[invalid-argument-type] + ) + + # Also check shock SD + est_shock_sd = float( + p1.loc[("shock_sds", 0, "skill", "-"), "value"] # ty: ignore[invalid-argument-type] + ) + + tol = 0.25 # generous tolerance for quadrature-based estimation + assert abs(est_beta - true_params["beta"]) < tol, ( + f"beta: estimated={est_beta:.4f}, true={true_params['beta']}" + ) + assert abs(est_constant - true_params["constant"]) < tol, ( + f"constant: estimated={est_constant:.4f}, true={true_params['constant']}" + ) + assert abs(est_shock_sd - true_params["shock_sd"]) < tol, ( + f"shock_sd: estimated={est_shock_sd:.4f}, true={true_params['shock_sd']}" + ) + + +@pytest.mark.end_to_end +def test_af_vs_chs_transition_params_agree() -> None: + """Verify AF and CHS transition parameter estimates are in the same ballpark. + + Use the same synthetic DGP as the measurement params comparison test, + but now compare the transition parameters estimated by both methods. + """ + data, _true_params = _simulate_linear_transition_data(n_obs=500, n_periods=3) + model = _make_linear_transition_model(n_periods=3) + + # --- AF estimation --- + af_result = estimate_af( + model_spec=model, + data=data, + af_options=AFEstimationOptions( + n_halton_points=40, + n_halton_points_shock=20, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ), + ) + + # --- CHS estimation (naive start: all free params = 0.1) --- + chs_est = _run_chs_estimation(model, data) + + # --- Compare transition parameters --- + af_p1 = af_result.period_results[1].params + + af_beta = float( + af_p1.loc[("transition", 0, "skill", "skill"), "value"] # ty: ignore[invalid-argument-type] + ) + af_constant = float( + af_p1.loc[("transition", 0, "skill", "constant"), "value"] # ty: ignore[invalid-argument-type] + ) + af_shock = float( + af_p1.loc[("shock_sds", 0, "skill", "-"), "value"] # ty: ignore[invalid-argument-type] + ) + + chs_beta = float( + chs_est.loc[("transition", 0, "skill", "skill"), "value"] # ty: ignore[invalid-argument-type] + ) + chs_constant = float( + chs_est.loc[("transition", 0, "skill", "constant"), "value"] # ty: ignore[invalid-argument-type] + ) + chs_shock = float( + chs_est.loc[("shock_sds", 0, "skill", "-"), "value"] # ty: ignore[invalid-argument-type] + ) + + tol = 0.3 # generous: different methods, different # periods used + assert abs(af_beta - chs_beta) < tol, ( + f"beta: AF={af_beta:.4f} vs CHS={chs_beta:.4f}" + ) + assert abs(af_constant - chs_constant) < tol, ( + f"constant: AF={af_constant:.4f} vs CHS={chs_constant:.4f}" + ) + assert abs(af_shock - chs_shock) < tol, ( + f"shock_sd: AF={af_shock:.4f} vs CHS={chs_shock:.4f}" + ) + + +def _run_chs_estimation( + model: ModelSpec, + data: pd.DataFrame, +) -> pd.DataFrame: + """Run CHS estimation with uninformed but feasible start values. + + Use generic defaults that don't favour either estimator: loadings = 1, + controls = 0, SDs = 0.5, transition = 0.5, initial_states = 0. + Probability constraints are satisfied (equal shares). + """ + max_inputs = get_maximization_inputs(model, data) + params = max_inputs["params_template"].copy() + free = params["lower_bound"] != params["upper_bound"] + cat = params.index.get_level_values("category") + params.loc[free, "value"] = 0.5 + params.loc[free & (cat == "loadings"), "value"] = 1.0 + params.loc[free & (cat == "controls"), "value"] = 0.0 + params.loc[free & (cat == "initial_states"), "value"] = 0.0 + # Probability constraints must be satisfied at start params + for constr in max_inputs["constraints"]: + if isinstance(constr, om.ProbabilityConstraint): + prob_idx = constr.selector(params[["value"]]).index + params.loc[prob_idx, "value"] = 1.0 / len(prob_idx) + + def _neg_ll_and_grad(p: pd.DataFrame) -> tuple[float, np.ndarray]: + val, grad = max_inputs["loglike_and_gradient"](p) + return -float(val), -np.array(grad) + + return om.minimize( + fun=lambda p: -max_inputs["loglike"](p), + params=params[["value"]], + algorithm="scipy_lbfgsb", + bounds=om.Bounds(lower=params["lower_bound"], upper=params["upper_bound"]), + constraints=max_inputs["constraints"], + fun_and_jac=_neg_ll_and_grad, + ).params + + +@pytest.mark.long_running +def test_af_vs_chs_both_estimated_on_model2(model2_af, model2_data) -> None: + """Run both AF and CHS optimisation on MODEL2 data and compare estimates. + + This test actually optimises both estimators (not just loading stored + params), so it takes a while. Skipped in CI via the long_running marker. + """ + chs_est = _run_chs_estimation(model2_af, model2_data) + + # --- AF estimation --- + af_result = estimate_af( + model_spec=model2_af, + data=model2_data, + af_options=AFEstimationOptions( + n_halton_points=60, + n_halton_points_shock=30, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ), + ) + + # --- Compare period-0 measurement params --- + af_p0 = af_result.period_results[0].params + meas_tol = 0.5 # generous: different estimators, AF uses 3 periods + + for meas, fac in [("y2", "fac1"), ("y3", "fac1"), ("y5", "fac2"), ("y6", "fac2")]: + af_val = float( + af_p0.loc[("loadings", 0, meas, fac), "value"] # ty: ignore[invalid-argument-type] + ) + chs_val = float( + chs_est.loc[("loadings", 0, meas, fac), "value"] # ty: ignore[invalid-argument-type] + ) + assert np.isfinite(af_val), f"AF loading({meas},{fac}) not finite" + assert np.isfinite(chs_val), f"CHS loading({meas},{fac}) not finite" + assert abs(af_val - chs_val) < meas_tol, ( + f"loading({meas},{fac}): AF={af_val:.4f} vs CHS={chs_val:.4f}" + ) + + # --- Compare transition params (period 0->1) --- + af_p1 = af_result.period_results[1].params + trans_tol = 0.5 + + # fac2 linear: self-productivity + af_fac2_self = float( + af_p1.loc[("transition", 0, "fac2", "fac2"), "value"] # ty: ignore[invalid-argument-type] + ) + chs_fac2_self = float( + chs_est.loc[("transition", 0, "fac2", "fac2"), "value"] # ty: ignore[invalid-argument-type] + ) + assert abs(af_fac2_self - chs_fac2_self) < trans_tol, ( + f"fac2 self-prod: AF={af_fac2_self:.4f} vs CHS={chs_fac2_self:.4f}" + ) + + # All transition params should be finite + af_trans = af_p1.query("category == 'transition'") + assert af_trans["value"].apply(np.isfinite).all(), ( + f"Non-finite AF transition params:\n{af_trans}" + ) + + # AF transition params should NOT be stuck at initialisation + trans_values = af_trans["value"].to_numpy() + assert not np.allclose(trans_values, 0.5, atol=0.01), ( + "AF transition params stuck at init values" + ) + + # --- Print comparison for manual inspection --- + print("\n\nMODEL2: AF vs CHS (both estimated)") + print("=" * 70) + print(f"{'Parameter':40s} {'AF':>10s} {'CHS':>10s}") + print("-" * 70) + for idx, row in af_trans.iterrows(): + ix = tuple(idx) # ty: ignore[invalid-argument-type] + chs_loc = ("transition", ix[1], ix[2], ix[3]) + chs_v = ( + float(chs_est.loc[chs_loc, "value"]) + if chs_loc in chs_est.index + else float("nan") + ) + print( + f" trans {ix[2]:6s} {ix[3]:12s} {row['value']:10.4f} {chs_v:10.4f}" + ) + af_shocks = af_p1.query("category == 'shock_sds'") + for idx, row in af_shocks.iterrows(): + ix = tuple(idx) # ty: ignore[invalid-argument-type] + chs_loc = ("shock_sds", ix[1], ix[2], ix[3]) + chs_v = ( + float(chs_est.loc[chs_loc, "value"]) + if chs_loc in chs_est.index + else float("nan") + ) + print(f" shock {ix[2]:19s} {row['value']:10.4f} {chs_v:10.4f}") + print("-" * 70) + + +# --------------------------------------------------------------------------- +# Investment equation tests +# --------------------------------------------------------------------------- + + +@pytest.mark.end_to_end +def test_af_estimate_with_endogenous_factor() -> None: + """Verify AF estimation works with an endogenous (investment) factor. + + DGP: + theta_{t+1} = 0.6 * theta_t + 0.3 * I_t + 0.05 + eta + (log_ces-like, but linear for simplicity) + I_t = 0.5 * theta_t + 0.2 * Y_t + eps_I + Skill measures: Z^s_{t,m} = intercept + loading * theta_t + noise + Investment measures: Z^I_{t,m} = intercept + loading * I_t + noise + """ + rng = np.random.default_rng(123) + n_obs, n_periods = 400, 3 + + # True parameters + true_beta_skill = 0.6 # theta on theta + true_beta_inv = 0.3 # investment on theta_next + true_trans_constant = 0.05 + true_shock_sd = 0.3 + true_inv_beta0 = 0.0 # investment intercept + true_inv_beta_theta = 0.5 # investment depends on skill + true_inv_beta_y = 0.2 # investment depends on income + true_inv_sd = 0.25 + + # Simulate + theta = np.zeros((n_obs, n_periods)) + inv = np.zeros((n_obs, n_periods)) + income = rng.normal(1.0, 0.5, n_obs) # exogenous, time-invariant + theta[:, 0] = rng.normal(0, 1, n_obs) + inv[:, 0] = ( + true_inv_beta0 + + true_inv_beta_theta * theta[:, 0] + + true_inv_beta_y * income + + rng.normal(0, true_inv_sd, n_obs) + ) + for t in range(n_periods - 1): + theta[:, t + 1] = ( + true_trans_constant + + true_beta_skill * theta[:, t] + + true_beta_inv * inv[:, t] + + rng.normal(0, true_shock_sd, n_obs) + ) + if t + 1 < n_periods: + inv[:, t + 1] = ( + true_inv_beta0 + + true_inv_beta_theta * theta[:, t + 1] + + true_inv_beta_y * income + + rng.normal(0, true_inv_sd, n_obs) + ) + + rows = [] + for i in range(n_obs): + for t in range(n_periods): + rows.append( + { + "caseid": i, + "period": t, + # Skill measures + "s1": theta[i, t] + rng.normal(0, 0.3), + "s2": 0.3 + 0.8 * theta[i, t] + rng.normal(0, 0.35), + "s3": -0.1 + 1.1 * theta[i, t] + rng.normal(0, 0.4), + # Investment measures + "i1": inv[i, t] + rng.normal(0, 0.3), + "i2": 0.2 + 0.9 * inv[i, t] + rng.normal(0, 0.35), + "i3": -0.1 + 1.2 * inv[i, t] + rng.normal(0, 0.4), + # Exogenous variable + "income": income[i], + } + ) + data = pd.DataFrame(rows).set_index(["caseid", "period"]) + + model = ModelSpec( + factors={ + "skill": FactorSpec( + measurements=(("s1", "s2", "s3"),) * n_periods, + normalizations=Normalizations( + loadings=({"s1": 1},) * n_periods, + intercepts=({"s1": 0},) * n_periods, + ), + transition_function="linear", + ), + "investment": FactorSpec( + measurements=(("i1", "i2", "i3"),) * n_periods, + normalizations=Normalizations( + loadings=({"i1": 1},) * n_periods, + intercepts=({"i1": 0},) * n_periods, + ), + transition_function="linear", + is_endogenous=True, + ), + }, + observed_factors=("income",), + estimation_options=EstimationOptions( + robust_bounds=True, + bounds_distance=0.001, + n_mixtures=1, + ), + ) + + result = estimate_af( + model_spec=model, + data=data, + af_options=AFEstimationOptions( + n_halton_points=30, + n_halton_points_shock=15, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ), + ) + + # Basic checks: estimation ran, produced results for all periods + assert len(result.period_results) == n_periods + for pr in result.period_results: + assert np.isfinite(pr.loglikelihood), ( + f"Period {pr.period}: non-finite loglik {pr.loglikelihood}" + ) + + # Period 1 should have investment equation parameters + p1 = result.period_results[1].params + inv_eq = p1.query("category == 'investment_eq'") + assert len(inv_eq) > 0, ( + "No investment_eq parameters found — endogenous factor not wired" + ) + + # Investment equation params should not be stuck at init + inv_eq_values = inv_eq["value"].to_numpy() + assert not np.allclose(inv_eq_values, 0.5, atol=0.05), ( + f"Investment eq params stuck at init: {inv_eq_values}" + ) + + +def test_prev_period_inv_meas_does_not_affect_transition_loglik_gradient() -> None: + """Guard inv-prev-meas invariance of the AF transition-step gradient. + + Inv-type measurements at the previous period must not contribute to + the gradient of `af_loglike_transition` w.r.t. current-step parameters. + MATLAB's reference AF likelihood (`AF_Application_One_Normal_Translog.m`, + `create_nodes_weights_12`) evaluates inv-type measurements exactly once, + at the step where the inv is generated as a current-period measurement. + They are deliberately omitted from the chained-sample importance weight + at the next transition step (`prod_inv` is commented out in the MATLAB + source). Re-evaluating them would be wrong: the chained sample carries + forward only state factors, so the previous step's inv value is no + longer available; evaluating prev-period inv measurements against the + *current* step's freshly-drawn inv would be a wrong-value comparison. + + The Python port restricts the prev-meas factor to state-factor + loadings only (using `state_factor_indices_in_latent` to slice the + columns). This test guards against future refactors that re-introduce + a parameter-dependent contribution from inv-loading rows at the + previous period: it perturbs only the inv-meas columns of + `prev_measurements` and asserts the gradient w.r.t. all current-step + parameters is unchanged. + """ + rng = np.random.default_rng(20260507) + n_obs = 5 + n_state = 1 + n_endog = 1 + n_obs_factors = 0 + n_measures = 2 # 1 skill + 1 inv at current period + n_prev_measures = 2 # 1 skill + 1 inv at prev period + n_controls = 1 # constant + n_halton = 3 + n_components = 1 + + # Loading masks: row 0 = skill meas (loads on factor 0=skills), row 1 = + # inv meas (loads on factor 1=investment). Both at current and prev. + loading_mask = jnp.array([[True, False], [False, True]]) + prev_loading_mask = jnp.array([[True, False], [False, True]]) + + measurements = jnp.array(rng.normal(size=(n_obs, n_measures))) + controls = jnp.ones((n_obs, n_controls)) + prev_measurements_a = jnp.array(rng.normal(size=(n_obs, n_prev_measures))) + # Perturb ONLY the inv-meas column (index 1) at the previous period. + inv_perturbation = jnp.array(rng.normal(size=n_obs)) + prev_measurements_b = prev_measurements_a.at[:, 1].set( # noqa: PD008 + prev_measurements_a[:, 1] + inv_perturbation + ) + prev_controls = jnp.ones((n_obs, n_controls)) + + # Prev-period measurement-system parameters (held fixed at the + # transition step in production -- they were estimated previously). + prev_loadings_flat = jnp.array([1.0, 1.0]) + prev_control_params = jnp.zeros((n_prev_measures, n_controls)) + prev_meas_sds = jnp.array([0.5, 0.4]) + + # Period-0 Schur-conditional payload: per-obs cond_means and per-component + # cond_chols (for the joint-Halton chain rebuild scheme). + cond_means = jnp.array(rng.normal(size=(n_components, n_obs, n_state))) + cond_chols = jnp.array([[[0.5]], [[0.4]]]) + cond_weights = jnp.ones((n_obs, n_components)) + prev_distribution = { + "cond_weights": cond_weights, + "cond_means": cond_means, + "cond_chols": cond_chols, + } + + # No prior chain (this is the 0->1 step). Joint Halton dim: + # n_state (z_state) + 0 prior steps + (n_shock + n_endog) current step. + chain_links: tuple = () + obs_factor_values_chain = jnp.zeros((n_obs, 0, n_obs_factors)) + joint_nodes = jnp.array(rng.normal(size=(n_halton, n_state + n_state + n_endog))) + joint_weights = jnp.full(n_halton, 1.0 / n_halton) + + def transition_func(full_states: jax.Array, params: jax.Array) -> jax.Array: + # Linear: theta_t = a * theta_prev + b * inv + c. Returns shape (n_state,). + return jnp.array( + [params[0] * full_states[0] + params[1] * full_states[1] + params[2]] + ) + + total_n_transition_params = 3 + n_inv_eq_params_per = 1 + n_state + n_obs_factors + total_n_inv_params = n_endog * n_inv_eq_params_per + + # Param vector layout matches `_parse_transition_params`: 3 transition + # params, 1 shock sd, 2 inv_eq params, 1 inv sd, 2 control params, 2 + # loadings, 2 meas sds = 13 entries total. + params_value = jnp.array( + [ + 0.6, + 0.3, + 0.1, + 0.4, + 0.0, + 0.5, + 0.2, + 0.0, + 0.0, + 1.0, + 1.0, + 0.3, + 0.3, + ] + ) + + state_factor_indices_in_latent = jnp.array([0], dtype=jnp.int32) + shock_factor_indices = jnp.array([0], dtype=jnp.int32) + obs_factor_values = jnp.zeros((n_obs, n_obs_factors)) + + def _ll(prev_meas: jax.Array, params: jax.Array) -> jax.Array: + return af_loglike_transition( + params, + n_state_factors=n_state, + n_endogenous_factors=n_endog, + n_measures=n_measures, + n_controls=n_controls, + measurements=measurements, + controls=controls, + loading_mask=loading_mask, + prev_measurements=prev_meas, + prev_controls=prev_controls, + prev_loading_mask=prev_loading_mask, + prev_control_params=prev_control_params, + prev_loadings_flat=prev_loadings_flat, + prev_meas_sds=prev_meas_sds, + prev_distribution=prev_distribution, + chain_links=chain_links, + obs_factor_values_chain=obs_factor_values_chain, + joint_nodes=joint_nodes, + joint_weights=joint_weights, + transition_func=transition_func, + total_n_transition_params=total_n_transition_params, + total_n_inv_params=total_n_inv_params, + n_inv_eq_params_per=n_inv_eq_params_per, + observed_factor_values=obs_factor_values, + stability_floor=1e-300, + state_factor_indices_in_latent=state_factor_indices_in_latent, + n_shock_factors=1, + shock_factor_indices=shock_factor_indices, + ) + + def loglike_a(params: jax.Array) -> jax.Array: + return _ll(prev_measurements_a, params) + + def loglike_b(params: jax.Array) -> jax.Array: + return _ll(prev_measurements_b, params) + + grad_a = jax.grad(loglike_a)(params_value) + grad_b = jax.grad(loglike_b)(params_value) + + np.testing.assert_allclose(np.asarray(grad_a), np.asarray(grad_b), atol=1e-10) + + # Sanity: with a non-zero perturbation, the inv-row residuals do change, + # so the loglik *value* itself differs (by a per-obs constant). That + # difference must NOT be zero -- otherwise the test isn't actually + # exercising the inv-loading rows. + val_a = float(loglike_a(params_value)) + val_b = float(loglike_b(params_value)) + assert not np.isclose(val_a, val_b), ( + "Test sanity failure: perturbing prev inv-meas changed nothing -- " + "the test isn't exercising the inv-loading rows." + ) + + +def test_rebuild_chain_at_period_matches_python_forward_pass() -> None: + """Unit test for `_rebuild_chain_at_period`. + + Hand-code a 2-step linear chain (1 state factor, 1 endog factor, 1 + observed factor) and assert the helper's output matches a Python + forward pass to numerical precision. Catches index/reshape bugs in + the chain-rebuild helper independently of the integrand. + """ + rng = np.random.default_rng(20260507) + n_state = 1 + n_endog = 1 + n_obs_factors = 1 + n_inv_eq_params_per = 1 + n_state + n_obs_factors + + # Two prior chain steps (so we're computing θ_0 → θ_1 → θ_2). + z_state = jnp.asarray(rng.normal(size=n_state)) + z_inv_per_step = jnp.asarray(rng.normal(size=(2, n_endog))) + z_shock_per_step = jnp.asarray(rng.normal(size=(2, n_state))) + + initial_mean = jnp.asarray(rng.normal(size=n_state)) + initial_chol = jnp.asarray([[0.7]]) + + obs_factor_values_per_step = jnp.asarray(rng.normal(size=(2, n_obs_factors))) + + # Linear "transition": theta_next = a * theta + b * inv + c * obs + d. + # Wrap as the f(full_states, params) signature used in production. + def make_transition_func() -> Callable[[jax.Array, jax.Array], jax.Array]: + def fn(full_states: jax.Array, params: jax.Array) -> jax.Array: + a, b, c, d = params[0], params[1], params[2], params[3] + return jnp.array( + [a * full_states[0] + b * full_states[1] + c * full_states[2] + d] + ) + + return fn + + transition_func = make_transition_func() + + link_1 = ChainLink( + period=1, + transition_func=transition_func, + transition_params=jnp.array([0.6, 0.3, 0.05, 0.1]), + shock_sds=jnp.array([0.4]), + shock_factor_indices=jnp.array([0], dtype=jnp.int32), + inv_eq_params=jnp.array([0.0, 0.5, 0.2]), # intercept, beta_skills, beta_inc + inv_sds=jnp.array([0.25]), + n_inv_eq_params_per=n_inv_eq_params_per, + obs_factor_values=jnp.zeros((1, n_obs_factors)), # unused by helper + ) + link_2 = ChainLink( + period=2, + transition_func=transition_func, + transition_params=jnp.array([0.5, 0.4, 0.0, 0.2]), + shock_sds=jnp.array([0.3]), + shock_factor_indices=jnp.array([0], dtype=jnp.int32), + inv_eq_params=jnp.array([0.05, 0.6, 0.3]), + inv_sds=jnp.array([0.15]), + n_inv_eq_params_per=n_inv_eq_params_per, + obs_factor_values=jnp.zeros((1, n_obs_factors)), + ) + chain_links = (link_1, link_2) + + # Hand-coded forward pass. + theta_0 = initial_mean + initial_chol @ z_state + for step_idx, link in enumerate(chain_links): + z_inv = z_inv_per_step[step_idx] + z_shock = z_shock_per_step[step_idx] + obs_y = obs_factor_values_per_step[step_idx] + beta = link.inv_eq_params # (intercept, beta_skills, beta_inc) + inv_val = ( + beta[0] + + beta[1] * theta_0[0] + + beta[2] * obs_y[0] + + (link.inv_sds[0] * z_inv[0]) + ) + inv = jnp.array([inv_val]) + full = jnp.concatenate([theta_0, inv, obs_y]) + theta_next_det = transition_func(full, link.transition_params) + theta_0 = theta_next_det + jnp.array([link.shock_sds[0] * z_shock[0]]) + expected = theta_0 # θ at the last link's target period + + actual = _rebuild_chain_at_period( + z_state=z_state, + z_inv_per_step=z_inv_per_step, + z_shock_per_step=z_shock_per_step, + initial_mean=initial_mean, + initial_chol=initial_chol, + chain_links=chain_links, + obs_factor_values_at_obs_per_step=obs_factor_values_per_step, + n_state_factors=n_state, + n_endogenous_factors=n_endog, + ) + np.testing.assert_allclose(np.asarray(actual), np.asarray(expected), atol=1e-12) + + +def test_rebuild_chain_at_period_empty_chain_returns_period_0() -> None: + """Verify the empty-chain (0->1) path of `_rebuild_chain_at_period`. + + With no chain links, the helper just returns + ``initial_mean + initial_chol @ z_state``. + """ + rng = np.random.default_rng(7) + n_state = 2 + z_state = jnp.asarray(rng.normal(size=n_state)) + initial_mean = jnp.asarray(rng.normal(size=n_state)) + initial_chol = jnp.asarray([[0.5, 0.0], [0.1, 0.4]]) + expected = initial_mean + initial_chol @ z_state + + actual = _rebuild_chain_at_period( + z_state=z_state, + z_inv_per_step=jnp.zeros((0, 1)), + z_shock_per_step=jnp.zeros((0, n_state)), + initial_mean=initial_mean, + initial_chol=initial_chol, + chain_links=(), + obs_factor_values_at_obs_per_step=jnp.zeros((0, 0)), + n_state_factors=n_state, + n_endogenous_factors=1, + ) + np.testing.assert_allclose(np.asarray(actual), np.asarray(expected), atol=1e-14) + + +def test_af_joint_halton_recovers_sigma_prod_argmax() -> None: # noqa: PLR0915 + """Catch regression to split-Halton: sigma_prod recovery on synthetic translog. + + With all params except sigma_prod_0 pinned at the truth, the per-obs mean + log-likelihood at sigma_prod=truth must beat sigma_prod ≈ truth/4 by at least + 1.0 nat per obs. Under the buggy split-Halton scheme the argmax sat + near sigma ≈ truth/4 with truth being WORSE; under the joint-Halton fix + the argmax aligns with truth. The empirical joint-vs-split gap on + the MATLAB sim was ~2.5 nats per obs (see + ``sim_repro/debug_joint_halton.py`` and + ``obsidian/Professional/skillmodels/sigma-prod-collapse-2026-05-07.md``); + 1.0 nat is generous headroom that still flags any return to split. + + The test calls ``af_loglike_transition`` directly with hand-built + kwargs on a tiny synthetic translog DGP (1 state factor, 1 endog + factor, 1 observed factor), so it isolates the integrand from the + optimizer and runs in ~10s. + """ + rng = np.random.default_rng(20260508) + n_obs = 200 + n_halton = 500 + n_state = 1 + n_endog = 1 + n_obs_factors = 1 + n_inv_eq_params_per = 1 + n_state + n_obs_factors + + # MATLAB-translog truth values (from set_parameters in + # AF_Simulations_Translog.m), restricted to one state factor. + a_true = 0.9283 + sigma_t_true = 0.5125 # log(skills) coef in translog + gamma_t_true = 0.6113 # log(inv) coef + delta_t_true = -0.0175 # cross coef + sigma_p_true = 0.36 + sigma_i_true = 0.10 + beta_skills_true = 0.10 + beta_inc_true = 0.90 + + # Mixture truth (matches MATLAB sim): two components on (skills, log_inc). + p_a_true = 0.62 + mu_a = jnp.array([-4.0, -2.0]) # (skills, log_inc) + cov_a = jnp.array([[0.62, 0.035], [0.035, 0.056]]) + mu_b = jnp.array([6.0, 3.0]) + cov_b = jnp.array([[0.83, 0.17], [0.17, 1.28]]) + + # Period-0 measurement system (3 skill measures). + lam_skills_0 = jnp.array([1.0, 0.36, 0.56]) + sd_skills_0 = jnp.array([0.68, 0.03, 0.08]) + # Period-1 measurement system: 3 skill measures + 3 inv measures. + lam_skills_1 = jnp.array([1.0, 0.66, 1.18]) + sd_skills_1 = jnp.array([0.51, 0.12, 0.19]) + lam_inv_1 = jnp.array([1.0, 0.84, 0.79]) + sd_inv_1 = jnp.array([0.15, 0.39, 0.47]) + + # Forward simulation of one panel. + u = rng.uniform(size=n_obs) + is_a = (u < p_a_true).astype(np.float64) + + def _draw_2d(mu: jax.Array, cov: jax.Array, n: int) -> np.ndarray: + chol = np.linalg.cholesky(np.asarray(cov)) + z = rng.normal(size=(n, 2)) + return np.asarray(mu)[None, :] + z @ chol.T + + draw_a = _draw_2d(mu_a, cov_a, n_obs) + draw_b = _draw_2d(mu_b, cov_b, n_obs) + skills_0 = is_a * draw_a[:, 0] + (1 - is_a) * draw_b[:, 0] + log_inc = is_a * draw_a[:, 1] + (1 - is_a) * draw_b[:, 1] + + # Period-0 data: z_skills_0 = lam * skills_0 + meas_noise. + z_skills_0 = ( + np.asarray(lam_skills_0)[None, :] * skills_0[:, None] + + rng.normal(size=(n_obs, 3)) * np.asarray(sd_skills_0)[None, :] + ) + + # Period-0->1 transition: inv_0 = beta_sk*skills_0 + beta_inc*log_inc + sd_I*z. + inv_0_true = ( + beta_skills_true * skills_0 + + beta_inc_true * log_inc + + rng.normal(size=n_obs) * sigma_i_true + ) + skills_1 = ( + a_true + + sigma_t_true * skills_0 + + gamma_t_true * inv_0_true + + delta_t_true * skills_0 * inv_0_true + + rng.normal(size=n_obs) * sigma_p_true + ) + z_skills_1 = ( + np.asarray(lam_skills_1)[None, :] * skills_1[:, None] + + rng.normal(size=(n_obs, 3)) * np.asarray(sd_skills_1)[None, :] + ) + z_inv_1 = ( + np.asarray(lam_inv_1)[None, :] * inv_0_true[:, None] + + rng.normal(size=(n_obs, 3)) * np.asarray(sd_inv_1)[None, :] + ) + + # Period-0 cond-distribution payload (Schur conditional given log_inc). + def _schur(mu_2d: jax.Array, cov_2d: jax.Array) -> tuple[jax.Array, jax.Array]: + # skills given log_inc: cond_mean (per obs) and cond_chol (scalar). + sigma_skills_inc = cov_2d[0, 1] + var_inc = cov_2d[1, 1] + var_cond = cov_2d[0, 0] - sigma_skills_inc**2 / var_inc + cond_chol = jnp.sqrt(var_cond) + cond_means = mu_2d[0] + (sigma_skills_inc / var_inc) * ( + jnp.asarray(log_inc) - mu_2d[1] + ) + return cond_means.reshape(n_obs, 1), jnp.asarray([[cond_chol]]) + + cond_mean_a, cond_chol_a = _schur(mu_a, cov_a) + cond_mean_b, cond_chol_b = _schur(mu_b, cov_b) + cond_means = jnp.stack([cond_mean_a, cond_mean_b], axis=0) + cond_chols = jnp.stack([cond_chol_a, cond_chol_b], axis=0) + + # Per-obs Bayes posterior weights from the marginal Y density. + def _log_marg_y(mu: jax.Array, cov: jax.Array) -> jax.Array: + var_y = cov[1, 1] + return ( + -0.5 * jnp.log(2 * jnp.pi * var_y) + - 0.5 * (jnp.asarray(log_inc) - mu[1]) ** 2 / var_y + ) + + log_w_a = jnp.log(p_a_true) + _log_marg_y(mu_a, cov_a) + log_w_b = jnp.log(1.0 - p_a_true) + _log_marg_y(mu_b, cov_b) + log_w = jnp.stack([log_w_a, log_w_b], axis=-1) + cond_weights = jax.nn.softmax(log_w, axis=-1) + + prev_distribution = { + "cond_weights": cond_weights, + "cond_means": cond_means, + "cond_chols": cond_chols, + } + + # Period-1 measurement loadings: 6 measures in order (skill_1, skill_2, + # skill_3, inv_1, inv_2, inv_3) -- skill measures load on factor 0 + # (skills), inv measures load on factor 1 (investment). + n_measures = 6 + measurements = jnp.concatenate( + [jnp.asarray(z_skills_1), jnp.asarray(z_inv_1)], axis=1 + ) + loading_mask = jnp.array( + [ + [True, False], + [True, False], + [True, False], + [False, True], + [False, True], + [False, True], + ] + ) + loadings_flat_curr = jnp.concatenate([lam_skills_1, lam_inv_1]) + meas_sds_curr = jnp.concatenate([sd_skills_1, sd_inv_1]) + + # Period-0 measurement system (prev) -- 3 skill measures. + n_prev_measures = 3 + prev_measurements = jnp.asarray(z_skills_0) + prev_loading_mask = jnp.array([[True, False]] * 3) + prev_loadings_flat = lam_skills_0 + prev_meas_sds = sd_skills_0 + + # No controls (zeros). + n_controls = 1 # constant + controls = jnp.ones((n_obs, 1)) + prev_controls = jnp.ones((n_obs, 1)) + + obs_factor_values = jnp.asarray(log_inc).reshape(n_obs, 1) + + # Transition function: log-translog (matches MATLAB sim). + def transition_func(full_states: jax.Array, params: jax.Array) -> jax.Array: + # full_states = [theta, inv, log_inc]; params = [lin_skills, lin_inv, + # lin_inc, sq_skills, sq_inv, sq_inc, inter_skills_inv, + # inter_skills_inc, inter_inv_inc, constant]. + skills = full_states[0] + inv = full_states[1] + return jnp.array( + [ + params[9] + + params[0] * skills + + params[1] * inv + + params[6] * skills * inv + ] + ) + + total_n_transition_params = 10 + n_per_inv = n_inv_eq_params_per + total_n_inv_params = n_endog * n_per_inv + + state_factor_indices_in_latent = jnp.array([0], dtype=jnp.int32) + shock_factor_indices = jnp.array([0], dtype=jnp.int32) + + # Param vector layout: transition (10) + shock_sds (1) + inv_eq (3) + + # inv_sds (1) + control_params (n_measures*n_controls=6) + loadings (6) + # + meas_sds (6) = 33. + transition_params_truth = jnp.array( + [ + sigma_t_true, + gamma_t_true, + 0.0, # lin coef on log_inc + 0.0, + 0.0, + 0.0, # squares + delta_t_true, # skills * inv + 0.0, + 0.0, # other interactions + a_true, + ] + ) + inv_eq_params_truth = jnp.array([0.0, beta_skills_true, beta_inc_true]) + + def _build_params(sigma_p: float) -> jax.Array: + return jnp.concatenate( + [ + transition_params_truth, + jnp.array([sigma_p]), + inv_eq_params_truth, + jnp.array([sigma_i_true]), + jnp.zeros(n_measures * n_controls), # control intercepts + loadings_flat_curr, + meas_sds_curr, + ] + ) + + def _ll(sigma_p: float) -> float: + params_value = _build_params(sigma_p) + neg_mean = af_loglike_transition( + params_value, + n_state_factors=n_state, + n_endogenous_factors=n_endog, + n_measures=n_measures, + n_controls=n_controls, + measurements=measurements, + controls=controls, + loading_mask=loading_mask, + prev_measurements=prev_measurements, + prev_controls=prev_controls, + prev_loading_mask=prev_loading_mask, + prev_control_params=jnp.zeros((n_prev_measures, n_controls)), + prev_loadings_flat=prev_loadings_flat, + prev_meas_sds=prev_meas_sds, + prev_distribution=prev_distribution, + chain_links=(), + obs_factor_values_chain=jnp.zeros((n_obs, 0, n_obs_factors)), + joint_nodes=jnp.array( + np.random.default_rng(1).normal( + size=(n_halton, n_state + n_state + n_endog) + ) + ), + joint_weights=jnp.full(n_halton, 1.0 / n_halton), + transition_func=transition_func, + total_n_transition_params=total_n_transition_params, + total_n_inv_params=total_n_inv_params, + n_inv_eq_params_per=n_per_inv, + observed_factor_values=obs_factor_values, + stability_floor=1e-300, + state_factor_indices_in_latent=state_factor_indices_in_latent, + n_shock_factors=1, + shock_factor_indices=shock_factor_indices, + ) + # Convert from neg-mean back to per-obs mean ll. + return float(-neg_mean) + + sigma_truth = sigma_p_true + sigma_wrong = 0.09 # well below truth (= truth / 4) + ll_truth = _ll(sigma_truth) + ll_wrong = _ll(sigma_wrong) + gap = ll_truth - ll_wrong + assert gap > 1.0, ( + f"Joint-Halton sigma_prod recovery REGRESSED: ll(truth={sigma_truth})=" + f"{ll_truth:.4f} should beat ll(wrong={sigma_wrong})={ll_wrong:.4f} by " + f"at least 1.0 nat per obs but gap is only {gap:.4f}. The empirical " + f"joint-vs-split gap on the MATLAB translog sim was ~2.5 nats; a gap " + f"below 1.0 here suggests the AF likelihood has reverted to the split-" + f"Halton scheme that biases sigma_prod toward 0." + ) + + +def test_af_joint_halton_recovers_sigma_prod_with_chain_link() -> None: # noqa: PLR0915 + """As above, but exercise a 1→2 step where ``chain_links`` is non-empty. + + For the 0→1 step the joint Halton dim is just `n_state + n_shock + + n_endog` and the joint-vs-split distinction is subtle (no prior + chain to bridge). For 1→2 steps the joint Halton couples z_state + + prior chain shocks + current shocks all in one sequence — that's + where MATLAB's working scheme actually outperforms split Halton. + + This test runs `estimate_af` end-to-end on a tiny synthetic translog + DGP through periods 0, 1, 2, then verifies the period-2 (= 1→2) + estimated sigma_prod_1 is within 35% of truth. Under split Halton + this parameter collapses toward 0; under joint Halton it recovers + near truth (0.42 in the MATLAB sim). The 35% threshold (vs split- + Halton's ~100% collapse) clearly separates the two regimes while + absorbing JAX numerical-determinism differences across CI vs local + hardware that nudged the recovered estimate from ~28% to ~31% on + the same fixed seed. + """ + pytest.importorskip("optimagic") + rng = np.random.default_rng(20260509) + n_obs = 300 + n_periods = 3 + + # MATLAB-translog truths. + a_t = (0.9283, 0.9536) + sigma_t_arr = (0.5125, 0.7295) + gamma_t_arr = (0.6113, 0.2814) + delta_t_arr = (-0.0175, -0.0024) + sigma_p_arr = (0.36, 0.42) + sigma_i_arr = (0.10, 0.10) + beta_skills = (0.10, 0.10) + beta_inc = (0.90, 0.90) + lam_skills = ( + np.array([1.0, 0.36, 0.56]), + np.array([1.0, 0.66, 1.18]), + np.array([1.0, 0.19, 0.50]), + ) + sd_skills = ( + np.array([0.68, 0.03, 0.08]), + np.array([0.51, 0.12, 0.19]), + np.array([0.14, 0.03, 0.15]), + ) + lam_inv = (np.array([1.0, 0.84, 0.79]),) * 2 + sd_inv = (np.array([0.15, 0.39, 0.47]),) * 2 + + # Initial mixture (matches MATLAB). + p_a = 0.62 + mu_a = np.array([-4.0, -2.0]) + cov_a = np.array([[0.62, 0.035], [0.035, 0.056]]) + mu_b = np.array([6.0, 3.0]) + cov_b = np.array([[0.83, 0.17], [0.17, 1.28]]) + + u = rng.uniform(size=n_obs) + is_a = (u < p_a).astype(np.float64) + chol_a = np.linalg.cholesky(cov_a) + chol_b = np.linalg.cholesky(cov_b) + z_init = rng.normal(size=(n_obs, 2)) + draw_a = mu_a[None, :] + z_init @ chol_a.T + draw_b = mu_b[None, :] + z_init @ chol_b.T + skills = np.zeros((n_obs, n_periods)) + skills[:, 0] = is_a * draw_a[:, 0] + (1 - is_a) * draw_b[:, 0] + log_inc = is_a * draw_a[:, 1] + (1 - is_a) * draw_b[:, 1] + inv = np.zeros((n_obs, n_periods - 1)) + for t in range(n_periods - 1): + inv[:, t] = ( + beta_skills[t] * skills[:, t] + + beta_inc[t] * log_inc + + rng.normal(size=n_obs) * sigma_i_arr[t] + ) + skills[:, t + 1] = ( + a_t[t] + + sigma_t_arr[t] * skills[:, t] + + gamma_t_arr[t] * inv[:, t] + + delta_t_arr[t] * skills[:, t] * inv[:, t] + + rng.normal(size=n_obs) * sigma_p_arr[t] + ) + + rows = [] + for i in range(n_obs): + for t in range(n_periods): + row = { + "caseid": int(i), + "period": int(t), + "skill_1": lam_skills[t][0] * skills[i, t] + + rng.normal() * sd_skills[t][0], + "skill_2": lam_skills[t][1] * skills[i, t] + + rng.normal() * sd_skills[t][1], + "skill_3": lam_skills[t][2] * skills[i, t] + + rng.normal() * sd_skills[t][2], + "log_income": float(log_inc[i]), + } + if 1 <= t <= 2: + inv_t_idx = t - 1 + row["inv_1"] = ( + lam_inv[inv_t_idx][0] * inv[i, inv_t_idx] + + rng.normal() * sd_inv[inv_t_idx][0] + ) + row["inv_2"] = ( + lam_inv[inv_t_idx][1] * inv[i, inv_t_idx] + + rng.normal() * sd_inv[inv_t_idx][1] + ) + row["inv_3"] = ( + lam_inv[inv_t_idx][2] * inv[i, inv_t_idx] + + rng.normal() * sd_inv[inv_t_idx][2] + ) + else: + row["inv_1"] = np.nan + row["inv_2"] = np.nan + row["inv_3"] = np.nan + rows.append(row) + data = pd.DataFrame(rows).set_index(["caseid", "period"]) + + skill_normalisations = Normalizations( + loadings=({"skill_1": 1.0},) * n_periods, + intercepts=({"skill_1": 0.0},) * n_periods, + ) + inv_normalisations = Normalizations( + loadings=({}, {"inv_1": 1.0}, {"inv_1": 1.0}), + intercepts=({}, {"inv_1": 0.0}, {"inv_1": 0.0}), + ) + + model = ModelSpec( + factors={ + "skills": FactorSpec( + measurements=(("skill_1", "skill_2", "skill_3"),) * n_periods, + normalizations=skill_normalisations, + transition_function="translog", + ), + "investment": FactorSpec( + measurements=( + (), + ("inv_1", "inv_2", "inv_3"), + ("inv_1", "inv_2", "inv_3"), + ), + normalizations=inv_normalisations, + transition_function="linear", + is_endogenous=True, + ), + }, + observed_factors=("log_income",), + estimation_options=EstimationOptions( + robust_bounds=True, bounds_distance=0.001, n_mixtures=2 + ), + ) + + # Pin everything except sigma_prod_0 / sigma_prod_1 at MATLAB truth. + truth_extras: list[tuple[tuple[str, int, str, str], float]] = [ + (("transition", 0, "skills", "constant"), a_t[0]), + (("transition", 0, "skills", "skills"), sigma_t_arr[0]), + (("transition", 0, "skills", "investment"), gamma_t_arr[0]), + (("transition", 0, "skills", "skills * investment"), delta_t_arr[0]), + (("transition", 1, "skills", "constant"), a_t[1]), + (("transition", 1, "skills", "skills"), sigma_t_arr[1]), + (("transition", 1, "skills", "investment"), gamma_t_arr[1]), + (("transition", 1, "skills", "skills * investment"), delta_t_arr[1]), + # Pin sigma_prod_0 at truth so we can isolate sigma_prod_1. + (("shock_sds", 0, "skills", "-"), sigma_p_arr[0]), + (("investment_eq", 0, "investment", "skills"), beta_skills[0]), + (("investment_eq", 0, "investment", "log_income"), beta_inc[0]), + (("investment_eq", 0, "investment", "constant"), 0.0), + (("investment_eq", 1, "investment", "skills"), beta_skills[1]), + (("investment_eq", 1, "investment", "log_income"), beta_inc[1]), + (("investment_eq", 1, "investment", "constant"), 0.0), + (("investment_sds", 0, "investment", "-"), sigma_i_arr[0]), + (("investment_sds", 1, "investment", "-"), sigma_i_arr[1]), + ] + # Pin all squares + log_income terms in translog to 0. + for t in range(n_periods - 1): + for fac in ("skills", "investment", "log_income"): + truth_extras.append((("transition", t, "skills", f"{fac} ** 2"), 0.0)) + truth_extras.append((("transition", t, "skills", "log_income"), 0.0)) + for cross in ("skills * log_income", "investment * log_income"): + truth_extras.append((("transition", t, "skills", cross), 0.0)) + + fixed_idx = pd.MultiIndex.from_tuples( + [r[0] for r in truth_extras], + names=["category", "period", "name1", "name2"], + ) + fixed_params = pd.DataFrame( + {"value": [r[1] for r in truth_extras]}, index=fixed_idx + ) + + truth_df = pd.DataFrame({"value": [v for _, v in truth_extras]}, index=fixed_idx) + + af_opts = AFEstimationOptions( + n_halton_points=200, + n_halton_points_shock=200, + n_mixture_components=2, + optimizer_algorithm="scipy_lbfgsb", + ) + result = estimate_af( + model_spec=model, + data=data, + af_options=af_opts, + fixed_params=fixed_params, + start_params=truth_df, + ) + p2 = result.period_results[2].params + sigma_prod_1_est = float( + p2.loc[("shock_sds", 1, "skills", "-"), "value"] # ty: ignore[invalid-argument-type] + ) + rel_err = abs(sigma_prod_1_est - sigma_p_arr[1]) / sigma_p_arr[1] + assert rel_err < 0.35, ( + f"sigma_prod_1 estimate {sigma_prod_1_est:.4f} is more than 35% off truth " + f"{sigma_p_arr[1]:.4f} (rel error {rel_err:.2%}). Suggests joint-Halton " + f"chain rebuild has regressed and sigma_prod is collapsing toward 0." + ) + + +# --------------------------------------------------------------------------- +# Posterior states tests +# --------------------------------------------------------------------------- + + +@pytest.mark.end_to_end +def test_af_get_filtered_states() -> None: + """Verify get_filtered_states works with AF results. + + Run AF on a simple single-factor model, then call get_filtered_states + with the AF result. Check the returned DataFrame has the right shape, + columns, and reasonable values. + """ + data, _true_params = _simulate_linear_transition_data(n_obs=200, n_periods=3) + model = _make_linear_transition_model(n_periods=3) + + af_result = estimate_af( + model_spec=model, + data=data, + af_options=AFEstimationOptions( + n_halton_points=30, + n_halton_points_shock=15, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ), + ) + + result = get_filtered_states( + model_spec=model, + data=data, + params=af_result.all_params, + af_result=af_result, + ) + + # Should have unanchored_states + assert "unanchored_states" in result + states_df = result["unanchored_states"]["states"] + + # DataFrame should have id, period, and factor columns + assert "period" in states_df.columns + assert "skill" in states_df.columns + + # One row per individual per period + n_obs = 200 + n_periods = 3 + assert len(states_df) == n_obs * n_periods + + # Values should be finite + assert states_df["skill"].apply(np.isfinite).all() + + # State estimates should have non-trivial variance (not all the same) + assert states_df["skill"].std() > 0.1 + + +@pytest.mark.end_to_end +def test_af_estimate_with_translog() -> None: + """Verify AF estimation runs with a translog transition function. + + Simulate from a linear DGP but estimate with translog — translog nests + linear (squares and interactions zero), so estimation should still + converge to a finite likelihood and recover the linear coefficient + roughly. With one factor there are only 3 translog params: beta, beta^2, + constant. + """ + data, _true_params = _simulate_linear_transition_data(n_obs=300, n_periods=3) + model = ModelSpec( + factors={ + "skill": FactorSpec( + measurements=(("m1", "m2", "m3"),) * 3, + normalizations=Normalizations( + loadings=({"m1": 1},) * 3, + intercepts=({"m1": 0},) * 3, + ), + transition_function="translog", + ), + }, + estimation_options=EstimationOptions( + robust_bounds=True, + bounds_distance=0.001, + n_mixtures=1, + ), + ) + + result = estimate_af( + model_spec=model, + data=data, + af_options=AFEstimationOptions( + n_halton_points=30, + n_halton_points_shock=15, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ), + ) + + assert len(result.period_results) == 3 + for pr in result.period_results: + assert np.isfinite(pr.loglikelihood), ( + f"Period {pr.period}: non-finite loglik {pr.loglikelihood}" + ) + + # Period 1 should have 3 translog transition params: skill, skill ** 2, constant + p1 = result.period_results[1].params + trans = p1.query("category == 'transition'") + param_names = set(trans.index.get_level_values("name2")) + assert {"skill", "skill ** 2", "constant"}.issubset(param_names), ( + f"Expected translog params skill, skill ** 2, constant; got {param_names}" + ) + + # Linear coefficient should be recovered roughly (true beta = 0.8). + # Tolerance is wide because translog overfits with squared term. + est_beta = float( + p1.loc[("transition", 0, "skill", "skill"), "value"] # ty: ignore[invalid-argument-type] + ) + assert abs(est_beta - 0.8) < 0.4, ( + f"translog skill coefficient: got {est_beta:.3f}, expected ≈ 0.8" + ) + + +@pytest.mark.end_to_end +def test_af_joint_initial_distribution_with_observed_factor() -> None: + """Verify the joint (latent, observed) initial distribution is estimated. + + When observed factors are specified, the initial period estimator models + the joint (latent, observed) distribution and conditions Halton draws on + observed values per the Schur complement (Antweiler & Freyberger 2025). + + This test constructs data with a latent skill strongly correlated with + observed income, runs AF, and verifies: + - The estimated initial_states includes an entry for the observed factor. + - The recovered mean of the observed factor is close to its sample mean. + - The covariance between latent and observed has the expected sign. + """ + rng = np.random.default_rng(2026) + n_obs, n_periods = 400, 2 + true_corr = 0.7 # strong latent-observed correlation + + # Jointly simulate skill and income with specified correlation + z = rng.multivariate_normal( + mean=[0.0, 1.0], + cov=[[1.0, true_corr * 0.5], [true_corr * 0.5, 0.25]], + size=n_obs, + ) + theta = z[:, 0] + income = z[:, 1] + + rows = [] + for i in range(n_obs): + for t in range(n_periods): + rows.append( + { + "caseid": i, + "period": t, + "s1": theta[i] + rng.normal(0, 0.3), + "s2": 0.3 + 0.9 * theta[i] + rng.normal(0, 0.35), + "s3": -0.1 + 1.1 * theta[i] + rng.normal(0, 0.4), + "income": income[i], + } + ) + data = pd.DataFrame(rows).set_index(["caseid", "period"]) + + model = ModelSpec( + factors={ + "skill": FactorSpec( + measurements=(("s1", "s2", "s3"),) * n_periods, + normalizations=Normalizations( + loadings=({"s1": 1},) * n_periods, + intercepts=({"s1": 0},) * n_periods, + ), + transition_function="linear", + ), + }, + observed_factors=("income",), + estimation_options=EstimationOptions( + robust_bounds=True, + bounds_distance=0.001, + n_mixtures=1, + ), + ) + + result = estimate_af( + model_spec=model, + data=data, + af_options=AFEstimationOptions( + n_halton_points=40, + n_halton_points_shock=15, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ), + ) + + p0 = result.period_results[0].params + + # initial_states must now include an entry for the observed factor + income_mean_loc = ("initial_states", 0, "mixture_0", "income") + assert income_mean_loc in p0.index, ( + "initial_states should include the observed factor 'income'" + ) + est_income_mean = float(p0.loc[income_mean_loc, "value"]) # ty: ignore[invalid-argument-type] + sample_income_mean = float(income.mean()) + assert abs(est_income_mean - sample_income_mean) < 0.15, ( + f"Estimated income mean {est_income_mean:.3f} far from sample " + f"{sample_income_mean:.3f}." + ) + + # Cross-covariance entry (skill-income) should reflect the positive + # correlation in the DGP; stored as lower-triangular Cholesky with + # factor ordering (latent, observed). + cross_loc = ("initial_cholcovs", 0, "mixture_0", "income-skill") + assert cross_loc in p0.index, ( + "Cross Cholesky entry between skill and income should be present" + ) + # For a 2x2 joint Cholesky with positive cross-cov, the (1,0) entry + # should be positive. + cross_val = float(p0.loc[cross_loc, "value"]) # ty: ignore[invalid-argument-type] + assert cross_val > 0.05, ( + f"Expected positive skill-income covariance; got Cholesky[1,0]={cross_val:.3f}" + ) + + +@pytest.mark.end_to_end +def test_af_fixed_params_pins_time_invariant_latent() -> None: + """Verify fixed_params pins MC-style time-invariant latent factors. + + Construct a 2-factor model where `mc` is time-invariant and `skill` + evolves linearly. Pin mc's transitions to identity and its shock SD + to a near-zero floor (same convention CHS uses for augmented periods). + After estimation, the pinned parameters must equal the input values + exactly (not optimized away). + """ + rng = np.random.default_rng(7) + n_obs, n_periods = 300, 3 + mc = rng.normal(0, 1, n_obs) + theta = np.zeros((n_obs, n_periods)) + theta[:, 0] = rng.normal(0, 1, n_obs) + for t in range(n_periods - 1): + theta[:, t + 1] = 0.7 * theta[:, t] + 0.2 * mc + rng.normal(0, 0.3, n_obs) + + rows = [] + for i in range(n_obs): + for t in range(n_periods): + row = { + "caseid": i, + "period": t, + "s1": theta[i, t] + rng.normal(0, 0.3), + "s2": 0.3 + 0.9 * theta[i, t] + rng.normal(0, 0.35), + "s3": -0.1 + 1.1 * theta[i, t] + rng.normal(0, 0.4), + } + if t == 0: + row["m1"] = mc[i] + rng.normal(0, 0.3) + row["m2"] = 0.2 + 0.8 * mc[i] + rng.normal(0, 0.35) + row["m3"] = -0.1 + 1.1 * mc[i] + rng.normal(0, 0.4) + rows.append(row) + data = pd.DataFrame(rows).set_index(["caseid", "period"]) + + model = ModelSpec( + factors={ + "skill": FactorSpec( + measurements=(("s1", "s2", "s3"),) * n_periods, + normalizations=Normalizations( + loadings=({"s1": 1},) * n_periods, + intercepts=({"s1": 0},) * n_periods, + ), + transition_function="linear", + ), + "mc": FactorSpec( + measurements=(("m1", "m2", "m3"), (), ()), + normalizations=Normalizations( + loadings=({"m1": 1}, {}, {}), + intercepts=({"m1": 0}, {}, {}), + ), + transition_function="linear", + ), + }, + estimation_options=EstimationOptions( + robust_bounds=True, + bounds_distance=0.001, + n_mixtures=1, + ), + ) + + # Pin mc to identity transition + floor shock SD across both + # transition periods (0 and 1). + fixed_entries: list[tuple[tuple[str, int, str, str], float]] = [] + for t in (0, 1): + for reg in ("skill", "mc", "constant"): + fixed_entries.append( + (("transition", t, "mc", reg), 1.0 if reg == "mc" else 0.0) + ) + fixed_entries.append((("shock_sds", t, "mc", "-"), 0.001)) + fixed_idx = pd.MultiIndex.from_tuples( + [e[0] for e in fixed_entries], + names=["category", "period", "name1", "name2"], + ) + fixed_df = pd.DataFrame({"value": [e[1] for e in fixed_entries]}, index=fixed_idx) + + result = estimate_af( + model_spec=model, + data=data, + af_options=AFEstimationOptions( + n_halton_points=30, + n_halton_points_shock=15, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ), + fixed_params=fixed_df, + ) + + for t_trans in (0, 1): + p_t = result.period_results[t_trans + 1].params + for reg in ("skill", "mc", "constant"): + expected = 1.0 if reg == "mc" else 0.0 + val = float( + p_t.loc[("transition", t_trans, "mc", reg), "value"] # ty: ignore[invalid-argument-type] + ) + assert val == expected, ( + f"mc transition period {t_trans}, regressor {reg}: " + f"expected {expected}, got {val}" + ) + sd = float( + p_t.loc[("shock_sds", t_trans, "mc", "-"), "value"] # ty: ignore[invalid-argument-type] + ) + assert sd == 0.001, f"mc shock_sd period {t_trans}: {sd} (expected 0.001)" + + +def _make_three_factor_log_ces_model( + n_periods: int, +) -> tuple[ModelSpec, pd.DataFrame]: + """Build a 3-factor model with log_ces on fac1 and simulated data. + + fac1 is produced via CES from (fac1, fac2, fac3). In the DGP we mute + fac3's contribution so tests can recover the pinning without fighting a + strong signal from that factor. + """ + rng = np.random.default_rng(17) + n_obs = 250 + + fac1 = np.zeros((n_obs, n_periods)) + fac2 = np.zeros((n_obs, n_periods)) + fac3 = np.zeros((n_obs, n_periods)) + fac1[:, 0] = rng.normal(0.5, 0.2, n_obs) + fac2[:, 0] = rng.normal(0.5, 0.2, n_obs) + fac3[:, 0] = rng.normal(0.0, 0.2, n_obs) + for t in range(n_periods - 1): + fac1[:, t + 1] = 0.4 * fac1[:, t] + 0.6 * fac2[:, t] + rng.normal(0, 0.1, n_obs) + fac2[:, t + 1] = 0.9 * fac2[:, t] + rng.normal(0, 0.1, n_obs) + fac3[:, t + 1] = fac3[:, t] + + rows = [] + for i in range(n_obs): + for t in range(n_periods): + rows.append( + { + "caseid": i, + "period": t, + "y1": fac1[i, t] + rng.normal(0, 0.1), + "y2": 0.5 + 0.8 * fac1[i, t] + rng.normal(0, 0.12), + "y3": -0.2 + 1.1 * fac1[i, t] + rng.normal(0, 0.1), + "y4": fac2[i, t] + rng.normal(0, 0.1), + "y5": 0.2 + 0.9 * fac2[i, t] + rng.normal(0, 0.12), + "y6": -0.1 + 1.1 * fac2[i, t] + rng.normal(0, 0.1), + "y7": fac3[i, t] + rng.normal(0, 0.1), + "y8": 0.1 + 0.9 * fac3[i, t] + rng.normal(0, 0.12), + "y9": -0.1 + 1.0 * fac3[i, t] + rng.normal(0, 0.1), + } + ) + data = pd.DataFrame(rows).set_index(["caseid", "period"]) + + model = ModelSpec( + factors={ + "fac1": FactorSpec( + measurements=(("y1", "y2", "y3"),) * n_periods, + normalizations=Normalizations( + loadings=({"y1": 1},) * n_periods, + intercepts=({"y1": 0},) * n_periods, + ), + transition_function="log_ces", + ), + "fac2": FactorSpec( + measurements=(("y4", "y5", "y6"),) * n_periods, + normalizations=Normalizations( + loadings=({"y4": 1},) * n_periods, + intercepts=({"y4": 0},) * n_periods, + ), + transition_function="linear", + ), + "fac3": FactorSpec( + measurements=(("y7", "y8", "y9"),) * n_periods, + normalizations=Normalizations( + loadings=({"y7": 1},) * n_periods, + intercepts=({"y7": 0},) * n_periods, + ), + transition_function="linear", + ), + }, + estimation_options=EstimationOptions( + robust_bounds=True, + bounds_distance=0.001, + n_mixtures=1, + ), + ) + return model, data + + +@pytest.mark.end_to_end +def test_af_log_ces_with_cross_factor_gamma_fixed_at_zero() -> None: + """Fix gamma_fac3 = 0 in a log_ces transition and run AF end-to-end. + + Before the probability-constraint + fixed-params support was added, this + combination raised `InvalidConstraintError` because optimagic refused + any fix inside a ProbabilityConstraint selector. Now the fold helper + removes gamma_fac3 from the selector and the remaining two gammas are + optimised on the simplex summing to one. + """ + model, data = _make_three_factor_log_ces_model(n_periods=2) + + fixed_idx = pd.MultiIndex.from_tuples( + [("transition", 0, "fac1", "fac3")], + names=["category", "period", "name1", "name2"], + ) + fixed_df = pd.DataFrame({"value": [0.0]}, index=fixed_idx) + + result = estimate_af( + model_spec=model, + data=data, + af_options=AFEstimationOptions( + n_halton_points=20, + n_halton_points_shock=10, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ), + fixed_params=fixed_df, + ) + + p_t = result.period_results[1].params + gamma_fac1 = float( + p_t.loc[("transition", 0, "fac1", "fac1"), "value"] # ty: ignore[invalid-argument-type] + ) + gamma_fac2 = float( + p_t.loc[("transition", 0, "fac1", "fac2"), "value"] # ty: ignore[invalid-argument-type] + ) + gamma_fac3 = float( + p_t.loc[("transition", 0, "fac1", "fac3"), "value"] # ty: ignore[invalid-argument-type] + ) + + assert gamma_fac3 == 0.0 + assert np.isclose(gamma_fac1 + gamma_fac2, 1.0, atol=1e-6) + assert gamma_fac1 > 0.0 + assert gamma_fac2 > 0.0 + + +@pytest.mark.end_to_end +def test_af_log_ces_with_cross_factor_gamma_fixed_at_nonzero() -> None: + """Fix gamma_fac3 = 0.2; verify remaining gammas sum to 0.8 at the optimum.""" + model, data = _make_three_factor_log_ces_model(n_periods=2) + + fixed_idx = pd.MultiIndex.from_tuples( + [("transition", 0, "fac1", "fac3")], + names=["category", "period", "name1", "name2"], + ) + fixed_df = pd.DataFrame({"value": [0.2]}, index=fixed_idx) + + result = estimate_af( + model_spec=model, + data=data, + af_options=AFEstimationOptions( + n_halton_points=20, + n_halton_points_shock=10, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ), + fixed_params=fixed_df, + ) + + p_t = result.period_results[1].params + gamma_fac1 = float( + p_t.loc[("transition", 0, "fac1", "fac1"), "value"] # ty: ignore[invalid-argument-type] + ) + gamma_fac2 = float( + p_t.loc[("transition", 0, "fac1", "fac2"), "value"] # ty: ignore[invalid-argument-type] + ) + gamma_fac3 = float( + p_t.loc[("transition", 0, "fac1", "fac3"), "value"] # ty: ignore[invalid-argument-type] + ) + + assert gamma_fac3 == 0.2 + assert np.isclose(gamma_fac1 + gamma_fac2, 0.8, atol=1e-6) + + +@pytest.mark.end_to_end +def test_af_estimate_tolerates_nan_measurements() -> None: + """NaN entries in measurement columns must not poison AF gradients. + + Real panels routinely have missing values; the AF likelihood masks + them out at the per-observation level so each observation contributes + only its non-missing measurements to the log-pdf sum. + """ + rng = np.random.default_rng(2026) + n_obs, n_periods = 400, 2 + + z = rng.multivariate_normal( + mean=[0.0, 1.0], + cov=[[1.0, 0.35], [0.35, 0.25]], + size=n_obs, + ) + theta = z[:, 0] + income = z[:, 1] + + rows = [] + for i in range(n_obs): + for t in range(n_periods): + row = { + "caseid": i, + "period": t, + "s1": theta[i] + rng.normal(0, 0.3), + "s2": 0.3 + 0.9 * theta[i] + rng.normal(0, 0.35), + "s3": -0.1 + 1.1 * theta[i] + rng.normal(0, 0.4), + "income": income[i], + } + # Sprinkle ~10% NaN into s2 across both periods. + if rng.random() < 0.10: + row["s2"] = np.nan + rows.append(row) + data = pd.DataFrame(rows).set_index(["caseid", "period"]) + assert data["s2"].isna().any(), "test setup should inject NaN measurements" + + model = ModelSpec( + factors={ + "skill": FactorSpec( + measurements=(("s1", "s2", "s3"),) * n_periods, + normalizations=Normalizations( + loadings=({"s1": 1},) * n_periods, + intercepts=({"s1": 0},) * n_periods, + ), + transition_function="linear", + ), + }, + observed_factors=("income",), + estimation_options=EstimationOptions( + robust_bounds=True, bounds_distance=0.001, n_mixtures=1 + ), + ) + + result = estimate_af( + model_spec=model, + data=data, + af_options=AFEstimationOptions( + n_halton_points=30, + n_halton_points_shock=15, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ), + ) + for pr in result.period_results: + assert pr.success, f"Period {pr.period} failed with NaN measurements" + assert np.isfinite(pr.loglikelihood) + + +@pytest.mark.end_to_end +def test_af_estimate_with_register_params_user_transition() -> None: + """AF must accept `@register_params`-decorated user transition functions. + + User-defined transition functions take individual factor arguments + plus a `params` dict; AF's per-period likelihood passes a packed + state vector and a flat parameter slice. Without the bridging + wrapper in `_get_raw_transition_functions`, callers that supply + custom transitions (e.g. `skane-struct-bw`) raise TypeError at the + first transition-step call. + """ + + @register_params(params=["constant", "skill"]) + def f_skill(skill: jax.Array, params: dict[str, float]) -> jax.Array: + return params["constant"] + params["skill"] * skill + + rng = np.random.default_rng(2026) + n_obs, n_periods = 300, 3 + theta = rng.normal(0, 1, (n_obs, n_periods)) + for t in range(1, n_periods): + theta[:, t] = 0.1 + 0.8 * theta[:, t - 1] + rng.normal(0, 0.4, n_obs) + + rows = [] + for i in range(n_obs): + for t in range(n_periods): + rows.append( + { + "caseid": i, + "period": t, + "s1": theta[i, t] + rng.normal(0, 0.3), + "s2": 0.3 + 0.9 * theta[i, t] + rng.normal(0, 0.35), + "s3": -0.1 + 1.1 * theta[i, t] + rng.normal(0, 0.4), + } + ) + data = pd.DataFrame(rows).set_index(["caseid", "period"]) + + model = ModelSpec( + factors={ + "skill": FactorSpec( + measurements=(("s1", "s2", "s3"),) * n_periods, + normalizations=Normalizations( + loadings=({"s1": 1},) * n_periods, + intercepts=({"s1": 0},) * n_periods, + ), + transition_function=f_skill, + ), + }, + estimation_options=EstimationOptions( + robust_bounds=True, bounds_distance=0.001, n_mixtures=1 + ), + ) + + result = estimate_af( + model_spec=model, + data=data, + af_options=AFEstimationOptions( + n_halton_points=30, + n_halton_points_shock=15, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ), + ) + for pr in result.period_results: + assert pr.success, f"Period {pr.period} failed" + assert np.isfinite(pr.loglikelihood) + + +def test_af_result_is_numpy_only_and_drops_samples_per_component() -> None: + """`estimate_af` returns a numpy-only, pickle-friendly result. + + Two related concerns: + + * `samples_per_component` -- per-period (n_halton, n_obs, n_state) + importance buffers used only for internal chain construction -- + must be cleared. At realistic problem sizes they are multiple GB + per period. + * Every other `jax.Array` in the result (`MixtureComponent.mean`, + `chol_cov`, `ConditionalDistribution.cond_means`, `cond_chols`, + `conditional_weights`, `mixture_weights`, and the arrays inside + every `ChainLink`) must be materialised as `np.ndarray`. JAX + arrays bind to GPU memory; if a user pickles the result while + JIT caches still occupy most of the device, `__reduce__` triggers + a GPU→host materialisation that OOMs. + """ + rng = np.random.default_rng(2026) + n_obs, n_periods = 200, 2 + theta = rng.normal(0, 1, (n_obs, n_periods)) + for t in range(1, n_periods): + theta[:, t] = 0.1 + 0.8 * theta[:, t - 1] + rng.normal(0, 0.4, n_obs) + + rows = [] + for i in range(n_obs): + for t in range(n_periods): + rows.append( + { + "caseid": i, + "period": t, + "s1": theta[i, t] + rng.normal(0, 0.3), + "s2": 0.3 + 0.9 * theta[i, t] + rng.normal(0, 0.35), + "s3": -0.1 + 1.1 * theta[i, t] + rng.normal(0, 0.4), + } + ) + data = pd.DataFrame(rows).set_index(["caseid", "period"]) + + model = ModelSpec( + factors={ + "skill": FactorSpec( + measurements=(("s1", "s2", "s3"),) * n_periods, + normalizations=Normalizations( + loadings=({"s1": 1},) * n_periods, + intercepts=({"s1": 0},) * n_periods, + ), + transition_function="linear", + ), + }, + estimation_options=EstimationOptions( + robust_bounds=True, bounds_distance=0.001, n_mixtures=1 + ), + ) + + result = estimate_af( + model_spec=model, + data=data, + af_options=AFEstimationOptions( + n_halton_points=20, + n_halton_points_shock=10, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ), + ) + + def _assert_numpy(arr: object, label: str) -> None: + if arr is None: + return + assert isinstance(arr, np.ndarray), ( + f"{label} should be a numpy ndarray, got {type(arr).__name__}" + ) + + for cd in result.conditional_distributions: + assert cd.samples_per_component == (), ( + "samples_per_component should be cleared before returning" + ) + _assert_numpy(cd.mixture_weights, "mixture_weights") + _assert_numpy(cd.conditional_weights, "conditional_weights") + _assert_numpy(cd.cond_means, "cond_means") + _assert_numpy(cd.cond_chols, "cond_chols") + for component in cd.components: + _assert_numpy(component.mean, "MixtureComponent.mean") + _assert_numpy(component.chol_cov, "MixtureComponent.chol_cov") + for cl in cd.chain_links: + _assert_numpy(cl.transition_params, "ChainLink.transition_params") + _assert_numpy(cl.shock_sds, "ChainLink.shock_sds") + _assert_numpy(cl.shock_factor_indices, "ChainLink.shock_factor_indices") + _assert_numpy(cl.inv_eq_params, "ChainLink.inv_eq_params") + _assert_numpy(cl.inv_sds, "ChainLink.inv_sds") + _assert_numpy(cl.obs_factor_values, "ChainLink.obs_factor_values") diff --git a/tests/test_af_inference.py b/tests/test_af_inference.py new file mode 100644 index 00000000..7ac8fd57 --- /dev/null +++ b/tests/test_af_inference.py @@ -0,0 +1,252 @@ +"""Tests for ``skillmodels.af.inference.compute_af_standard_errors``. + +The AF inference path is the score bootstrap of Antweiler & Freyberger +(2025) §4.2 (Armstrong-Bertanha-Hong 2014 style). There is no +analytical sandwich path: AF §4.2 explicitly notes the closed-form +variance ignores estimation error in earlier-period nuisance +parameters and is therefore incorrect for any t >= 1. +""" + +import numpy as np +import pandas as pd +import pytest + +from skillmodels.af.estimate import estimate_af +from skillmodels.af.inference import ( + AFInferenceResult, + compute_af_standard_errors, +) +from skillmodels.af.types import AFEstimationOptions +from skillmodels.common.model_spec import ( + EstimationOptions, + FactorSpec, + ModelSpec, + Normalizations, +) + + +def _simulate_linear_data( + *, + n_obs: int, + n_periods: int = 2, + seed: int = 0, +) -> pd.DataFrame: + """Simulate a simple single-factor linear-transition panel.""" + rng = np.random.default_rng(seed) + theta = np.zeros((n_obs, n_periods)) + theta[:, 0] = rng.normal(0.0, 1.0, n_obs) + for t in range(n_periods - 1): + theta[:, t + 1] = 0.1 + 0.7 * theta[:, t] + rng.normal(0.0, 0.3, n_obs) + + loadings = (1.0, 0.9, 1.1) + intercepts = (0.0, 0.2, -0.1) + sds = (0.3, 0.4, 0.35) + rows = [] + for i in range(n_obs): + for t in range(n_periods): + row = {"caseid": i, "period": t} + for m_idx, meas in enumerate(("m1", "m2", "m3")): + row[meas] = ( + intercepts[m_idx] + + loadings[m_idx] * theta[i, t] + + rng.normal(0, sds[m_idx]) + ) + rows.append(row) + + return pd.DataFrame(rows).set_index(["caseid", "period"]) + + +def _make_linear_model(n_periods: int = 2) -> ModelSpec: + return ModelSpec( + factors={ + "skill": FactorSpec( + measurements=(("m1", "m2", "m3"),) * n_periods, + normalizations=Normalizations( + loadings=({"m1": 1},) * n_periods, + intercepts=({"m1": 0},) * n_periods, + ), + transition_function="linear", + ), + }, + estimation_options=EstimationOptions( + robust_bounds=True, + bounds_distance=0.001, + n_mixtures=1, + ), + ) + + +@pytest.fixture(scope="module") +def fitted_result() -> tuple[AFInferenceResult, pd.DataFrame]: + """Fit the AF estimator once and bootstrap SEs; reused across tests.""" + data = _simulate_linear_data(n_obs=400, n_periods=2) + model = _make_linear_model(n_periods=2) + af_opts = AFEstimationOptions( + n_halton_points=25, + n_halton_points_shock=15, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ) + fit = estimate_af(model_spec=model, data=data, af_options=af_opts) + inference = compute_af_standard_errors(fit, data, af_opts, n_boot=2000, seed=0) + return inference, fit.all_params + + +@pytest.mark.end_to_end +def test_af_inference_result_is_inference_dataclass( + fitted_result: tuple[AFInferenceResult, pd.DataFrame], +) -> None: + inference, _ = fitted_result + assert isinstance(inference, AFInferenceResult) + + +@pytest.mark.end_to_end +def test_af_inference_replicate_params_shape( + fitted_result: tuple[AFInferenceResult, pd.DataFrame], +) -> None: + inference, all_params = fitted_result + assert inference.n_boot == 2000 + assert inference.n_clusters == 400 + assert inference.replicate_params.shape == (2000, len(all_params.index)) + assert list(inference.replicate_params.columns) == list(all_params.index) + + +@pytest.mark.end_to_end +def test_af_inference_standard_errors_index_matches_params( + fitted_result: tuple[AFInferenceResult, pd.DataFrame], +) -> None: + inference, all_params = fitted_result + assert inference.standard_errors.index.equals(all_params.index) + + +@pytest.mark.end_to_end +def test_af_inference_vcov_row_index_matches_params( + fitted_result: tuple[AFInferenceResult, pd.DataFrame], +) -> None: + inference, all_params = fitted_result + assert inference.vcov.index.equals(all_params.index) + + +@pytest.mark.end_to_end +def test_af_inference_vcov_column_index_matches_params( + fitted_result: tuple[AFInferenceResult, pd.DataFrame], +) -> None: + inference, all_params = fitted_result + assert inference.vcov.columns.equals(all_params.index) + + +@pytest.mark.end_to_end +def test_af_inference_vcov_diagonal_matches_se_squared( + fitted_result: tuple[AFInferenceResult, pd.DataFrame], +) -> None: + """SEs and vcov are computed from the same replicate distribution.""" + inference, _ = fitted_result + diag = np.diag(inference.vcov.to_numpy()) + se_squared = inference.standard_errors.to_numpy() ** 2 + np.testing.assert_allclose(diag, se_squared, rtol=1e-10, atol=1e-12) + + +@pytest.mark.end_to_end +def test_af_inference_pinned_loading_has_zero_se( + fitted_result: tuple[AFInferenceResult, pd.DataFrame], +) -> None: + inference, _ = fitted_result + assert float(inference.standard_errors.loc[("loadings", 0, "m1", "skill")]) == ( + pytest.approx(0.0, abs=1e-12) + ) + + +@pytest.mark.end_to_end +def test_af_inference_pinned_intercept_has_zero_se( + fitted_result: tuple[AFInferenceResult, pd.DataFrame], +) -> None: + inference, _ = fitted_result + assert float( + inference.standard_errors.loc[("controls", 0, "m1", "constant")] + ) == pytest.approx(0.0, abs=1e-12) + + +@pytest.mark.end_to_end +def test_af_inference_free_loading_has_positive_se( + fitted_result: tuple[AFInferenceResult, pd.DataFrame], +) -> None: + inference, _ = fitted_result + assert inference.standard_errors.loc[("loadings", 0, "m2", "skill")] > 0.0 + + +@pytest.mark.end_to_end +def test_af_inference_free_meas_sd_has_positive_se( + fitted_result: tuple[AFInferenceResult, pd.DataFrame], +) -> None: + inference, _ = fitted_result + assert inference.standard_errors.loc[("meas_sds", 0, "m2", "-")] > 0.0 + + +@pytest.mark.end_to_end +def test_af_inference_vcov_is_symmetric( + fitted_result: tuple[AFInferenceResult, pd.DataFrame], +) -> None: + inference, _ = fitted_result + v = inference.vcov.to_numpy() + np.testing.assert_allclose(v, v.T, atol=1e-10) + + +@pytest.mark.end_to_end +def test_af_inference_vcov_diagonal_nonnegative( + fitted_result: tuple[AFInferenceResult, pd.DataFrame], +) -> None: + inference, _ = fitted_result + diag = np.diag(inference.vcov.to_numpy()) + assert np.all(diag >= 0.0) + + +@pytest.mark.end_to_end +def test_af_inference_pinned_params_have_constant_replicates( + fitted_result: tuple[AFInferenceResult, pd.DataFrame], +) -> None: + """Loadings/intercepts pinned via Normalizations are constant across replicates.""" + inference, _ = fitted_result + pinned = [("loadings", t, "m1", "skill") for t in (0, 1)] + [ + ("controls", t, "m1", "constant") for t in (0, 1) + ] + for loc in pinned: + if loc in inference.replicate_params.columns: + col = inference.replicate_params[loc].to_numpy() + assert col.std() == pytest.approx(0.0, abs=1e-12) + + +@pytest.mark.end_to_end +def test_af_inference_se_shrinks_with_sample_size() -> None: + """SE for a representative free parameter should shrink roughly as 1/sqrt(n).""" + model = _make_linear_model(n_periods=2) + af_opts = AFEstimationOptions( + n_halton_points=25, + n_halton_points_shock=15, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ) + + data_small = _simulate_linear_data(n_obs=200, n_periods=2, seed=1) + data_large = _simulate_linear_data(n_obs=800, n_periods=2, seed=1) + + fit_small = estimate_af(model_spec=model, data=data_small, af_options=af_opts) + fit_large = estimate_af(model_spec=model, data=data_large, af_options=af_opts) + + inf_small = compute_af_standard_errors( + fit_small, data_small, af_opts, n_boot=2000, seed=1 + ) + inf_large = compute_af_standard_errors( + fit_large, data_large, af_opts, n_boot=2000, seed=1 + ) + + loc = ("loadings", 0, "m2", "skill") + se_small = float(inf_small.standard_errors.loc[loc]) + se_large = float(inf_large.standard_errors.loc[loc]) + + # Sample size quadrupled: expect SE ~ halved. Tolerate a wide band + # because the bootstrap is noisy on moderate samples. + ratio = se_large / se_small + assert 0.25 < ratio < 0.8, ( + f"Expected SE ratio in (0.25, 0.8) under 4x sample-size bump; " + f"got {ratio:.3f} (se_small={se_small}, se_large={se_large})" + ) diff --git a/tests/test_af_initialization.py b/tests/test_af_initialization.py new file mode 100644 index 00000000..3a890909 --- /dev/null +++ b/tests/test_af_initialization.py @@ -0,0 +1,106 @@ +"""Tests for AF initialization strategies.""" + +import numpy as np +import pytest + +from skillmodels.af.types import AFEstimationOptions +from skillmodels.amn.moments import spearman_factor_moments + + +def test_default_initialization_strategy_is_amn(): + """Default initialization runs the full AMN estimator upfront.""" + opts = AFEstimationOptions() + + assert opts.initialization_strategy == "amn" + + +def test_initialization_strategy_can_be_set_to_spearman(): + """Legacy Spearman pre-pass is available under the `"spearman"` name.""" + opts = AFEstimationOptions(initialization_strategy="spearman") + + assert opts.initialization_strategy == "spearman" + + +def test_initialization_strategy_can_be_set_to_constant(): + """Legacy constant init remains available for regression testing.""" + opts = AFEstimationOptions( + initialization_strategy="constant", + ) + + assert opts.initialization_strategy == "constant" + + +def test_spearman_seed_closer_to_truth_than_constant_default(): + """Moment-based seed is closer to truth than the static 0.5 default. + + Synthetic data with known sigma_meas and Var(latent) — assert that the + Spearman residual variance gives a starting sigma_meas closer to truth + than the legacy ``obs_sd * 0.5`` heuristic. + """ + rng = np.random.default_rng(0) + n = 1000 + truth_loadings = np.array([1.0, 1.2, 0.9]) + truth_meas_sds = np.array([0.3, 0.4, 0.3]) + truth_factor_sd = 1.5 + factor = rng.normal(0.0, truth_factor_sd, size=n) + eps = rng.normal(0.0, 1.0, size=(n, 3)) * truth_meas_sds + measurements = truth_loadings * factor[:, None] + eps + + spearman = spearman_factor_moments(measurements, anchor_idx=0) + + # Spearman recovers sigma_meas within 30% of truth. + for k in range(3): + assert spearman.meas_sds[k] == pytest.approx(truth_meas_sds[k], rel=0.30) + + # Legacy default is obs_sd * 0.5; for sigma_meas truth=0.3 with anchor + # variance λ²·Var(F)+sigma_meas² ≈ 1²·2.25+0.09 ≈ 2.34, obs_sd ≈ 1.53, + # default seed ≈ 0.76 — way off truth 0.3. Spearman should be closer. + obs_sds = np.nanstd(measurements, axis=0) + legacy_seeds = np.maximum(obs_sds * 0.5, 0.01) + spearman_dist = np.abs(spearman.meas_sds - truth_meas_sds).sum() + legacy_dist = np.abs(legacy_seeds - truth_meas_sds).sum() + assert spearman_dist < legacy_dist + + +def test_spearman_falls_back_for_single_measurement_factor(): + """`valid=False` → moment-init returns the same fallback values.""" + measurements = np.random.default_rng(0).normal(size=(100, 1)) + + result = spearman_factor_moments(measurements, anchor_idx=0) + + assert not result.valid + # Fallback values are constant; downstream code should keep using + # the static defaults instead of overriding from these. + assert result.loadings.shape == (1,) + assert result.meas_sds.shape == (1,) + + +def test_initialization_strategy_other_options_unchanged(): + """Other AFEstimationOptions fields remain at their existing defaults.""" + opts = AFEstimationOptions() + + assert opts.n_halton_points == 50 + assert opts.n_halton_points_shock == 30 + assert opts.n_mixture_components == 2 + assert opts.optimizer_algorithm == "fides" + assert opts.two_stage is False + assert opts.coarse_fraction == 0.5 + assert opts.stability_floor == 1e-217 + assert opts.n_obs_per_batch is None + + +def test_moment_init_handles_pinned_anchor_loading(): + """When user pins loading to a non-1.0 value, anchor_loading respects it.""" + rng = np.random.default_rng(0) + n = 800 + loadings = np.array([2.0, 0.6, 1.2]) # anchor=2.0 (user normalization) + factor = rng.normal(0.0, 1.0, size=n) + eps = rng.normal(0.0, 0.4, size=(n, 3)) + measurements = loadings * factor[:, None] + eps + + result = spearman_factor_moments(measurements, anchor_idx=0, anchor_loading=2.0) + + assert result.loadings[0] == pytest.approx(2.0, abs=1e-12) + # Other loadings should be on the same scale. + assert result.loadings[1] == pytest.approx(0.6, rel=0.30) + assert result.loadings[2] == pytest.approx(1.2, rel=0.30) diff --git a/tests/test_af_t5_extension.py b/tests/test_af_t5_extension.py new file mode 100644 index 00000000..9a9158e5 --- /dev/null +++ b/tests/test_af_t5_extension.py @@ -0,0 +1,173 @@ +"""End-to-end test that AF works for T = 5 periods. + +The AF paper's iterative chain (Section 3) is described for general T, +but skillmodels' AF tests so far cover T = 3. This test runs the full +chain on a synthetic T=5 panel and confirms `estimate_af` produces +five per-period results with finite likelihoods and the expected +chain-link structure (k links after estimating period k). + +Marked `end_to_end` so it does not run in the default test suite. +""" + +import jax +import numpy as np +import pandas as pd +import pytest + +from skillmodels.af import AFEstimationOptions, estimate_af +from skillmodels.common.model_spec import ( + EstimationOptions, + FactorSpec, + ModelSpec, + Normalizations, +) +from skillmodels.common.params_index import get_params_index +from skillmodels.common.process_model import process_model + +jax.config.update("jax_enable_x64", True) + + +def _build_t5_model() -> ModelSpec: + """Two-factor T=5 model: linear `state`, linear `inv`, three measures each.""" + return ModelSpec( + factors={ + "state": FactorSpec( + measurements=(("y1", "y2", "y3"),) * 5, + normalizations=Normalizations( + loadings=({"y1": 1},) * 5, + intercepts=({"y1": 0},) * 5, + ), + transition_function="linear", + ), + "inv": FactorSpec( + measurements=(("z1", "z2", "z3"),) * 5, + normalizations=Normalizations( + loadings=({"z1": 1},) * 5, + intercepts=({"z1": 0},) * 5, + ), + transition_function="linear", + ), + }, + estimation_options=EstimationOptions( + robust_bounds=True, + bounds_distance=0.001, + n_mixtures=1, + ), + ) + + +def _truth_params_t5(model: ModelSpec) -> pd.DataFrame: + """Build a truth params DataFrame for the T=5 model from the params index.""" + processed = process_model(model) + p_index = get_params_index( + update_info=processed.update_info, + labels=processed.labels, + dimensions=processed.dimensions, + transition_info=processed.transition_info, + endogenous_factors_info=processed.endogenous_factors_info, + ) + df = pd.DataFrame({"value": np.zeros(len(p_index))}, index=p_index) + cat = df.index.get_level_values("category") + df.loc[cat == "loadings", "value"] = 1.0 + df.loc[cat == "meas_sds", "value"] = 0.3 + df.loc[cat == "shock_sds", "value"] = 0.4 + df.loc[cat == "mixture_weights", "value"] = 1.0 + for aug_period in range(4): + for factor, other in (("state", "inv"), ("inv", "state")): + for regressor, val in ( + (factor, 0.7), + (other, 0.2), + ("constant", 0.1), + ): + loc = ("transition", aug_period, factor, regressor) + if loc in df.index: + df.loc[loc, "value"] = val + cholcov_diag_mask = pd.Series( + [ + idx[0] == "initial_cholcovs" + and "-" in idx[3] + and idx[3].split("-")[0] == idx[3].split("-")[1] + for idx in df.index + ], + index=df.index, + ) + df.loc[cholcov_diag_mask, "value"] = 1.0 + return df + + +def _simulate_synthetic_t5( + model: ModelSpec, + params: pd.DataFrame, + n_obs: int, + seed: int, +) -> pd.DataFrame: + """Simulate (states + measurements) directly for the T=5 model.""" + n_periods = 5 + rng = np.random.default_rng(seed) + state = rng.normal(0.0, 1.0, size=(n_obs, 2)) # (state_t, inv_t) + state_history = [state.copy()] + + def _val(loc: tuple) -> float: + return float(params.loc[loc, "value"]) + + for t in range(1, n_periods): + prev = state_history[-1] + new_state = np.zeros_like(prev) + for f, idx in (("state", 0), ("inv", 1)): + other_idx = 1 - idx + other = "inv" if f == "state" else "state" + a = _val(("transition", t - 1, f, f)) + b = _val(("transition", t - 1, f, other)) + c = _val(("transition", t - 1, f, "constant")) + sigma = _val(("shock_sds", t - 1, f, "-")) + new_state[:, idx] = ( + a * prev[:, idx] + + b * prev[:, other_idx] + + c + + sigma * rng.normal(size=n_obs) + ) + state_history.append(new_state) + + records: list[dict] = [] + for obs_id in range(n_obs): + for t in range(n_periods): + row: dict[str, float | int] = {"caseid": obs_id, "period": t} + st = state_history[t][obs_id] + for f, idx in (("state", 0), ("inv", 1)): + meas_prefix = "y" if f == "state" else "z" + for k in (1, 2, 3): + meas_name = f"{meas_prefix}{k}" + lam = _val(("loadings", t, meas_name, f)) + sigma_eps = _val(("meas_sds", t, meas_name, "-")) + row[meas_name] = float(lam * st[idx] + sigma_eps * rng.normal()) + records.append(row) + return pd.DataFrame.from_records(records).set_index(["caseid", "period"]) + + +@pytest.mark.end_to_end +def test_af_chain_runs_for_t5() -> None: + """`estimate_af` runs the full T=5 chain and produces finite per-period llik.""" + model = _build_t5_model() + params = _truth_params_t5(model) + data = _simulate_synthetic_t5(model, params, n_obs=200, seed=20260510) + + af_options = AFEstimationOptions( + n_halton_points=20, + n_halton_points_shock=10, + n_mixture_components=1, + optimizer_algorithm="scipy_lbfgsb", + ) + + result = estimate_af(model_spec=model, data=data, af_options=af_options) + + assert len(result.period_results) == 5, ( + f"Expected 5 per-period results for T=5; got {len(result.period_results)}" + ) + for pr in result.period_results: + assert np.isfinite(pr.loglikelihood), ( + f"period {pr.period}: non-finite loglikelihood {pr.loglikelihood}" + ) + assert len(result.conditional_distributions) == 5 + # Each period after 0 carries one chain link per prior transition. + for t, cd in enumerate(result.conditional_distributions): + assert len(cd.chain_links) == max(t, 0) diff --git a/tests/test_amn_estimate.py b/tests/test_amn_estimate.py new file mode 100644 index 00000000..4a0bf58e --- /dev/null +++ b/tests/test_amn_estimate.py @@ -0,0 +1,110 @@ +"""Tests for `skillmodels.amn.estimate.estimate_amn` (end-to-end orchestration).""" + +import numpy as np +import pandas as pd +import pytest + +from skillmodels import estimate_amn +from skillmodels.amn.types import AMNEstimationOptions +from skillmodels.common.model_spec import ( + EstimationOptions, + FactorSpec, + ModelSpec, + Normalizations, +) + + +def _tiny_model() -> ModelSpec: + return ModelSpec( + factors={ + "skills": FactorSpec( + measurements=(("y1", "y2", "y3"), ("y1", "y2", "y3")), + normalizations=Normalizations( + loadings=({"y1": 1}, {"y1": 1}), + intercepts=({"y1": 0}, {}), + ), + transition_function="linear", + ), + }, + estimation_options=EstimationOptions( + robust_bounds=True, bounds_distance=0.001, n_mixtures=1 + ), + ) + + +def _tiny_data(n: int = 1500, seed: int = 0) -> pd.DataFrame: + rng = np.random.default_rng(seed) + rows = [] + for caseid in range(n): + f0 = rng.normal() + f1 = 0.6 * f0 + rng.normal(0, 0.5) + for period, f in [(0, f0), (1, f1)]: + rows.append( + { + "caseid": caseid, + "period": period, + "y1": f + rng.normal(0, 0.3), + "y2": 0.9 * f + rng.normal(0, 0.4), + "y3": 1.1 * f + rng.normal(0, 0.5), + } + ) + return pd.DataFrame(rows).set_index(["caseid", "period"]) + + +def test_estimate_amn_produces_combined_params_dataframe(): + model = _tiny_model() + data = _tiny_data(n=1500) + options = AMNEstimationOptions( + n_mixture_components=2, n_simulation_draws=5000, seed=0 + ) + + result = estimate_amn(model, data, options) + + assert result.all_params.index.names == [ + "category", + "aug_period", + "name1", + "name2", + ] + cats = set(result.all_params.index.get_level_values("category")) + assert {"loadings", "meas_sds", "transition", "shock_sds"} <= cats + # 6 measurement loadings, 6 meas_sds, 1 transition (slope on skills) + + # constant for period 0, 1 shock_sds for period 0. + assert "controls" in cats # measurement intercepts collapse to controls + + +def test_estimate_amn_honors_fixed_params(): + model = _tiny_model() + data = _tiny_data(n=1500) + options = AMNEstimationOptions( + n_mixture_components=2, n_simulation_draws=5000, seed=0 + ) + + pin_loc = ("loadings", 1, "y2", "skills") + fixed = pd.DataFrame( + {"value": [0.42]}, + index=pd.MultiIndex.from_tuples( + [pin_loc], names=["category", "aug_period", "name1", "name2"] + ), + ) + + result = estimate_amn(model, data, options, fixed_params=fixed) + + assert result.all_params.loc[pin_loc, "value"] == pytest.approx(0.42) + + +def test_estimate_amn_returns_success_flag(): + model = _tiny_model() + data = _tiny_data(n=1500) + options = AMNEstimationOptions( + n_mixture_components=2, n_simulation_draws=2000, seed=1 + ) + + result = estimate_amn(model, data, options) + + assert isinstance(result.success, bool) + assert result.stages.mixture.weights.shape == (2,) + assert result.stages.structural.factor_period_slots == ( + (0, "skills"), + (1, "skills"), + ) diff --git a/tests/test_amn_inference.py b/tests/test_amn_inference.py new file mode 100644 index 00000000..bcacd4fd --- /dev/null +++ b/tests/test_amn_inference.py @@ -0,0 +1,87 @@ +"""Tests for `skillmodels.amn.inference.compute_amn_standard_errors`.""" + +import numpy as np +import pandas as pd + +from skillmodels import compute_amn_standard_errors, estimate_amn +from skillmodels.amn.types import AMNEstimationOptions +from skillmodels.common.model_spec import ( + EstimationOptions, + FactorSpec, + ModelSpec, + Normalizations, +) + + +def _tiny_model() -> ModelSpec: + return ModelSpec( + factors={ + "skills": FactorSpec( + measurements=(("y1", "y2", "y3"), ("y1", "y2", "y3")), + normalizations=Normalizations( + loadings=({"y1": 1}, {"y1": 1}), + intercepts=({"y1": 0}, {}), + ), + transition_function="linear", + ), + }, + estimation_options=EstimationOptions( + robust_bounds=True, bounds_distance=0.001, n_mixtures=1 + ), + ) + + +def _tiny_data(n: int = 800, seed: int = 0) -> pd.DataFrame: + rng = np.random.default_rng(seed) + rows = [] + for caseid in range(n): + f0 = rng.normal() + f1 = 0.6 * f0 + rng.normal(0, 0.5) + for period, f in [(0, f0), (1, f1)]: + rows.append( + { + "caseid": caseid, + "period": period, + "y1": f + rng.normal(0, 0.3), + "y2": 0.9 * f + rng.normal(0, 0.4), + "y3": 1.1 * f + rng.normal(0, 0.5), + } + ) + return pd.DataFrame(rows).set_index(["caseid", "period"]) + + +def test_bootstrap_returns_expected_shapes(): + model = _tiny_model() + data = _tiny_data(n=500, seed=0) + options = AMNEstimationOptions( + n_mixture_components=2, n_simulation_draws=1000, seed=0 + ) + fit = estimate_amn(model, data, options) + + inference = compute_amn_standard_errors(fit, data, options, n_boot=5, seed=11) + + assert inference.n_boot == 5 + assert inference.n_clusters == 500 + assert inference.standard_errors.shape[0] == fit.all_params.shape[0] + assert inference.replicate_params.shape == (5, fit.all_params.shape[0]) + assert inference.vcov.shape == (fit.all_params.shape[0], fit.all_params.shape[0]) + + +def test_bootstrap_standard_errors_non_negative_and_finite_where_replicates_finite(): + model = _tiny_model() + data = _tiny_data(n=500, seed=1) + options = AMNEstimationOptions( + n_mixture_components=2, n_simulation_draws=1000, seed=0 + ) + fit = estimate_amn(model, data, options) + + inference = compute_amn_standard_errors(fit, data, options, n_boot=8, seed=42) + + # Wherever we have at least two finite replicates for a parameter, + # the std should be finite and non-negative. + for col in inference.standard_errors.index: + finite = inference.replicate_params[col].dropna() + if len(finite) >= 2: + se = inference.standard_errors[col] + assert np.isfinite(se) + assert se >= 0.0 diff --git a/tests/test_amn_minimum_distance.py b/tests/test_amn_minimum_distance.py new file mode 100644 index 00000000..148e2080 --- /dev/null +++ b/tests/test_amn_minimum_distance.py @@ -0,0 +1,221 @@ +"""Tests for `skillmodels.amn.minimum_distance` (AMN Stage 2).""" + +import numpy as np +import pandas as pd +import pytest + +from skillmodels.amn.minimum_distance import ( + _build_structure, + _pack_layout, + solve_minimum_distance, +) +from skillmodels.amn.mixture_em import ( + build_augmented_measure_layout, + build_augmented_measure_matrix, + fit_mixture_em, +) +from skillmodels.amn.types import ( + AugmentedMeasureLayout, + MixtureFitResult, +) +from skillmodels.common.model_spec import ( + EstimationOptions, + FactorSpec, + ModelSpec, + Normalizations, +) +from skillmodels.common.process_model import process_model + + +def _tiny_model() -> ModelSpec: + return ModelSpec( + factors={ + "skills": FactorSpec( + measurements=(("y1", "y2", "y3"), ("y1", "y2", "y3")), + normalizations=Normalizations( + loadings=({"y1": 1}, {"y1": 1}), + intercepts=({"y1": 0}, {}), + ), + transition_function="linear", + ), + }, + estimation_options=EstimationOptions( + robust_bounds=True, bounds_distance=0.001, n_mixtures=1 + ), + ) + + +def _build_oracle_mixture( + *, + n_components: int = 2, + n_aug: int = 6, + seed: int = 0, + layout: AugmentedMeasureLayout | None = None, +) -> tuple[MixtureFitResult, dict[str, np.ndarray]]: + """Build a synthetic MixtureFitResult with known structural moments. + + Layout: 2 periods x 3 measurements on a single latent factor, anchor + measurement loading=1, others = (1.0, 0.8, 1.2). Mean-zero on the + period-0 factor. + """ + del seed + if layout is None: + layout = AugmentedMeasureLayout( + columns=tuple( + f"y[{t}|skills|{m}]" for t in (0, 1) for m in ("y1", "y2", "y3") + ), + measurement_slots=tuple(range(n_aug)), + observed_factor_slots=(), + control_slots=(), + measurement_meta=tuple( + (t, "skills", m) for t in (0, 1) for m in ("y1", "y2", "y3") + ), + observed_factor_meta=(), + control_meta=(), + ) + + truth_lambda = np.zeros((6, 2)) + truth_lambda[0, 0] = 1.0 + truth_lambda[1, 0] = 0.8 + truth_lambda[2, 0] = 1.2 + truth_lambda[3, 1] = 1.0 + truth_lambda[4, 1] = 0.8 + truth_lambda[5, 1] = 1.2 + truth_intercept = np.array([0.0, 0.1, -0.2, 0.5, 0.3, 0.4]) + truth_sigma2 = np.array([0.3, 0.25, 0.4, 0.35, 0.2, 0.5]) ** 2 + + truth_mu = np.array([[-0.6, 0.4], [0.4, -0.3]]) # period-0 enforces sum-to-zero + # Enforce sum-to-zero on column 0 (period-0 latent slot) with + # weights 0.5/0.5. + truth_mu[1, 0] = -truth_mu[0, 0] + truth_omega = np.array( + [ + [[1.0, 0.4], [0.4, 1.2]], + [[0.9, 0.2], [0.2, 1.1]], + ] + ) + + means = np.empty((n_components, n_aug)) + covs = np.empty((n_components, n_aug, n_aug)) + for m in range(n_components): + means[m] = truth_intercept + truth_lambda @ truth_mu[m] + covs[m] = truth_lambda @ truth_omega[m] @ truth_lambda.T + np.diag(truth_sigma2) + + weights = np.array([0.5, 0.5]) + + return MixtureFitResult( + weights=weights, + means=means, + covariances=covs, + loglikelihood=-100.0, + n_iter=10, + converged=True, + layout=layout, + ), { + "lambda": truth_lambda, + "intercept": truth_intercept, + "sigma2": truth_sigma2, + "mu": truth_mu, + "omega": truth_omega, + } + + +def test_build_structure_identifies_anchor_and_baseline(): + model = _tiny_model() + processed = process_model(model) + layout = build_augmented_measure_layout(processed) + + struct = _build_structure(layout, processed) + + # 2 latent-factor-period slots: (0, skills) and (1, skills). + assert len(struct.factor_period_slots) == 2 + assert (0, "skills") in struct.factor_period_slots + assert (1, "skills") in struct.factor_period_slots + # 6 measurement slots; 2 of them (y1 at periods 0,1) have + # normalized loading=1, so lambda has 4 free entries. + assert struct.lambda_free_mask.sum() == 4 + # y1 at period 0 has normalized intercept=0; the other 5 are free. + assert struct.intercept_free_mask.sum() == 5 + # All 6 measurement slots have free sigma2 (no obs factors, no controls). + assert struct.sigma2_free_mask.sum() == 6 + # Baseline mean-zero slot is (0, "skills"). + baseline_slot = struct.factor_period_slots.index((0, "skills")) + assert baseline_slot in struct.baseline_mean_zero_slots + + +def test_pack_layout_returns_consistent_total(): + model = _tiny_model() + processed = process_model(model) + layout = build_augmented_measure_layout(processed) + struct = _build_structure(layout, processed) + + n_total, slices = _pack_layout(struct, n_components=2) + + # sigma2: 6 free; chol_0+chol_1: 2*3=6; mu: 2*2 - 1 baseline = 3; + # lambda: 4 free; intercept: 5 free => 6+6+3+4+5 = 24. + assert n_total == 24 + assert slices["sigma2"] == slice(0, 6) + + +def test_solve_minimum_distance_recovers_oracle(): + model = _tiny_model() + processed = process_model(model) + layout = build_augmented_measure_layout(processed) + mixture, _truth = _build_oracle_mixture(layout=layout) + + result = solve_minimum_distance(mixture, processed) + + # The minimum-distance criterion should be near zero on oracle moments. + assert result.objective_value < 1e-3 + + # Loadings should match truth within tolerance. + loadings = result.loadings.reset_index().set_index(["period", "measurement"]) + assert loadings.loc[(0, "y1"), "loading"] == pytest.approx(1.0, abs=1e-6) + assert loadings.loc[(0, "y2"), "loading"] == pytest.approx(0.8, abs=5e-2) + assert loadings.loc[(0, "y3"), "loading"] == pytest.approx(1.2, abs=5e-2) + + +def test_solve_minimum_distance_rejects_unknown_weighting(): + model = _tiny_model() + processed = process_model(model) + layout = build_augmented_measure_layout(processed) + mixture, _ = _build_oracle_mixture(layout=layout) + + with pytest.raises(ValueError, match="Unknown weighting"): + solve_minimum_distance(mixture, processed, weighting="bogus") + + +def test_solve_minimum_distance_runs_on_fitted_mixture(): + """End-to-end: simulate 1-component data, fit, then recover Lambda.""" + model = _tiny_model() + processed = process_model(model) + layout = build_augmented_measure_layout(processed) + + rng = np.random.default_rng(0) + n = 1500 + # period-0 factor mean-zero (sum-to-zero with itself => 0). + period0 = rng.normal(0.0, 1.0, size=n) + period1 = 0.7 * period0 + rng.normal(0.0, 0.6, size=n) + + rows = [] + for caseid in range(n): + for period, f in [(0, period0[caseid]), (1, period1[caseid])]: + rows.append( + { + "caseid": caseid, + "period": period, + "y1": f + rng.normal(0, 0.3), + "y2": 0.8 * f + rng.normal(0, 0.4), + "y3": 1.2 * f + rng.normal(0, 0.35), + } + ) + data = pd.DataFrame(rows).set_index(["caseid", "period"]) + augmented = build_augmented_measure_matrix(data, processed, layout) + + mixture = fit_mixture_em(augmented, n_components=2, n_init=2, seed=0, layout=layout) + + result = solve_minimum_distance(mixture, processed) + + # Just verifying it runs and produces a finite objective. + assert np.isfinite(result.objective_value) + assert result.loadings.shape[0] == 6 diff --git a/tests/test_amn_mixture_em.py b/tests/test_amn_mixture_em.py new file mode 100644 index 00000000..c88359d1 --- /dev/null +++ b/tests/test_amn_mixture_em.py @@ -0,0 +1,238 @@ +"""Tests for `skillmodels.amn.mixture_em` (AMN Stage 1).""" + +import numpy as np +import pandas as pd +import pytest + +from skillmodels.amn.mixture_em import ( + build_augmented_measure_layout, + build_augmented_measure_matrix, + fit_mixture_em, +) +from skillmodels.common.model_spec import ( + EstimationOptions, + FactorSpec, + ModelSpec, + Normalizations, +) +from skillmodels.common.process_model import process_model + + +def _tiny_model() -> ModelSpec: + """Return a 2-period, 1-latent-factor model with 3 indicators per period.""" + return ModelSpec( + factors={ + "skills": FactorSpec( + measurements=(("y1", "y2", "y3"), ("y1", "y2", "y3")), + normalizations=Normalizations( + loadings=({"y1": 1}, {"y1": 1}), + intercepts=({"y1": 0}, {}), + ), + transition_function="linear", + ), + }, + estimation_options=EstimationOptions( + robust_bounds=True, bounds_distance=0.001, n_mixtures=1 + ), + ) + + +def _tiny_long_data(n: int = 200, seed: int = 0) -> pd.DataFrame: + """Two periods, three measurements each, drawn from N(0, 1) + noise.""" + rng = np.random.default_rng(seed) + rows = [] + for caseid in range(n): + factor_0 = rng.normal() + factor_1 = 0.6 * factor_0 + rng.normal(0, 0.5) + for period, f in [(0, factor_0), (1, factor_1)]: + rows.append( + { + "caseid": caseid, + "period": period, + "y1": f + rng.normal(0, 0.3), + "y2": 0.9 * f + rng.normal(0, 0.4), + "y3": 1.1 * f + rng.normal(0, 0.5), + } + ) + return pd.DataFrame(rows).set_index(["caseid", "period"]) + + +def test_layout_has_one_slot_per_measurement_update(): + model = _tiny_model() + processed = process_model(model) + + layout = build_augmented_measure_layout(processed) + + # 2 periods x 3 measurements = 6 measurement slots, no observed factors + # or controls. + assert len(layout.measurement_slots) == 6 + assert layout.observed_factor_slots == () + assert layout.control_slots == () + assert len(layout.columns) == 6 + + +def test_layout_records_period_factor_and_measurement_names(): + model = _tiny_model() + processed = process_model(model) + + layout = build_augmented_measure_layout(processed) + + assert set(layout.measurement_meta) == { + (0, "skills", "y1"), + (0, "skills", "y2"), + (0, "skills", "y3"), + (1, "skills", "y1"), + (1, "skills", "y2"), + (1, "skills", "y3"), + } + + +def test_layout_skips_anchoring_rows(): + """Anchoring outcomes (purpose != measurement) must not become slots.""" + base = _tiny_model() + from skillmodels.common.model_spec import AnchoringSpec # noqa: PLC0415 + + anchored = base.with_anchoring( + AnchoringSpec( + outcomes={"skills": "outcome"}, + free_controls=False, + free_constant=False, + free_loadings=True, + ignore_constant_when_anchoring=True, + ) + ) + processed = process_model(anchored) + + layout = build_augmented_measure_layout(processed) + + # 6 measurement slots; anchoring update rows are filtered out. + for _, factor, _ in layout.measurement_meta: + assert factor == "skills" + assert len(layout.measurement_slots) == 6 + + +def test_matrix_fills_each_slot_from_the_right_period(): + model = _tiny_model() + processed = process_model(model) + data = _tiny_long_data(n=50, seed=1) + + layout = build_augmented_measure_layout(processed) + matrix = build_augmented_measure_matrix(data, processed, layout) + + assert matrix.shape == (50, 6) + + # Period 0 slot for y1 must equal data.loc[(*, 0), "y1"]. + period0_y1_slot = next( + slot + for slot, meta in zip( + layout.measurement_slots, layout.measurement_meta, strict=True + ) + if meta == (0, "skills", "y1") + ) + expected = data.xs(0, level="period")["y1"].to_numpy() + np.testing.assert_allclose(matrix[:, period0_y1_slot], expected) + + +def test_matrix_marks_missing_caseids_as_nan(): + model = _tiny_model() + processed = process_model(model) + layout = build_augmented_measure_layout(processed) + data = _tiny_long_data(n=10, seed=2) + # Drop period 1 for caseid 0 entirely. + data = data.drop(index=(0, 1)) + + matrix = build_augmented_measure_matrix(data, processed, layout) + + # The first row corresponds to caseid 0; period-1 slots should be NaN. + period1_slots = [ + slot + for slot, (period, _, _) in zip( + layout.measurement_slots, layout.measurement_meta, strict=True + ) + if period == 1 + ] + assert np.all(np.isnan(matrix[0, period1_slots])) + # Period-0 slots for the same caseid stay finite. + period0_slots = [ + slot + for slot, (period, _, _) in zip( + layout.measurement_slots, layout.measurement_meta, strict=True + ) + if period == 0 + ] + assert np.all(np.isfinite(matrix[0, period0_slots])) + + +def _simulate_two_component_panel( + *, + n: int, + weights: tuple[float, float], + means: tuple[np.ndarray, np.ndarray], + chols: tuple[np.ndarray, np.ndarray], + seed: int, +) -> np.ndarray: + rng = np.random.default_rng(seed) + labels = rng.choice([0, 1], size=n, p=list(weights)) + samples = np.empty((n, means[0].shape[0])) + for k in (0, 1): + idx = labels == k + if idx.any(): + standard = rng.normal(size=(idx.sum(), means[k].shape[0])) + samples[idx] = standard @ chols[k].T + means[k] + return samples + + +def test_fit_mixture_em_recovers_two_components_within_tolerance(): + truth_weights = (0.4, 0.6) + truth_means = (np.array([-1.5, 1.0]), np.array([1.5, -1.0])) + truth_chols = ( + np.linalg.cholesky(np.array([[1.0, 0.3], [0.3, 1.2]])), + np.linalg.cholesky(np.array([[0.8, -0.2], [-0.2, 1.0]])), + ) + augmented = _simulate_two_component_panel( + n=4000, + weights=truth_weights, + means=truth_means, + chols=truth_chols, + seed=11, + ) + + result = fit_mixture_em(augmented, n_components=2, n_init=3, seed=11) + + assert result.converged + # Order of components is arbitrary; line them up to the truth by + # nearest-mean. + order = np.argsort(result.means[:, 0]) + truth_order = np.argsort([truth_means[0][0], truth_means[1][0]]) + + np.testing.assert_allclose( + result.weights[order], + np.array(truth_weights)[truth_order], + atol=0.05, + ) + for fitted_k, truth_k in zip(order, truth_order, strict=True): + np.testing.assert_allclose( + result.means[fitted_k], + truth_means[truth_k], + atol=0.15, + ) + + +def test_fit_mixture_em_drops_incomplete_rows(): + rng = np.random.default_rng(3) + augmented = rng.normal(size=(200, 4)) + augmented[:50, 2] = np.nan # 50 rows incomplete + + result = fit_mixture_em(augmented, n_components=2, n_init=2, seed=3) + + # n_complete = 150; loglikelihood should be ~150 * per-row mean. + # The check we actually want is that it runs without error and the + # iteration count is sensible. + assert result.n_iter >= 1 + assert result.weights.shape == (2,) + + +def test_fit_mixture_em_raises_when_too_few_complete_rows(): + augmented = np.array([[np.nan, 1.0], [1.0, 2.0]]) + with pytest.raises(ValueError, match="complete-case"): + fit_mixture_em(augmented, n_components=3, n_init=1, seed=0) diff --git a/tests/test_amn_moments.py b/tests/test_amn_moments.py new file mode 100644 index 00000000..06f971c1 --- /dev/null +++ b/tests/test_amn_moments.py @@ -0,0 +1,213 @@ +"""Unit tests for `skillmodels.amn.moments` Spearman estimators.""" + +import numpy as np +import pytest + +from skillmodels.amn.moments import ( + SpearmanResult, + derive_unexplained_sd, + seed_beta_from_ols, + spearman_factor_moments, +) + + +def _simulate_three_indicators( + *, + n: int, + loadings: np.ndarray, + meas_sds: np.ndarray, + factor_var: float, + seed: int = 0, +) -> np.ndarray: + rng = np.random.default_rng(seed) + factor = rng.normal(0.0, np.sqrt(factor_var), size=n) + eps = rng.normal(0.0, 1.0, size=(n, len(loadings))) * meas_sds + return loadings * factor[:, None] + eps + + +def test_spearman_recovers_loadings_within_30pct(): + truth_loadings = np.array([1.0, 1.3, 0.8]) + truth_meas_sds = np.array([0.4, 0.5, 0.3]) + truth_factor_var = 1.5 + measurements = _simulate_three_indicators( + n=2000, + loadings=truth_loadings, + meas_sds=truth_meas_sds, + factor_var=truth_factor_var, + seed=42, + ) + + result = spearman_factor_moments(measurements, anchor_idx=0) + + assert result.valid + assert result.loadings[0] == pytest.approx(1.0, abs=1e-12) + assert result.loadings[1] == pytest.approx(truth_loadings[1], rel=0.30) + assert result.loadings[2] == pytest.approx(truth_loadings[2], rel=0.30) + assert result.latent_var == pytest.approx(truth_factor_var, rel=0.30) + for k in range(3): + assert result.meas_sds[k] == pytest.approx(truth_meas_sds[k], rel=0.30) + + +def test_spearman_anchor_fallback_on_zero_cov(): + rng = np.random.default_rng(0) + n = 1500 + factor = rng.normal(0.0, 1.0, size=n) + # First measurement is independent noise; the next two share the factor. + indep = rng.normal(0.0, 1.0, size=n) + measurements = np.column_stack( + [ + indep, + 1.2 * factor + 0.4 * rng.normal(size=n), + 0.9 * factor + 0.3 * rng.normal(size=n), + ] + ) + + result = spearman_factor_moments(measurements, anchor_idx=0) + + # Anchor candidate 0 is uncorrelated with the others — but the routine + # rotates to a different anchor and still returns a valid result, with + # the user-requested anchor (idx 0) reported on a 1.0 loading scale. + assert result.valid + assert result.loadings[0] == pytest.approx(1.0, abs=1e-12) + # The loading on idx 0 is on a degenerate scale; what matters is that + # the routine didn't NaN out and returned finite values everywhere. + assert np.all(np.isfinite(result.loadings)) + assert np.all(np.isfinite(result.meas_sds)) + assert np.isfinite(result.latent_var) + + +def test_spearman_handles_negative_residual_variance(): + # Tiny n forces sample noise where S_kk < λ_k² Var(F) is possible. + truth_loadings = np.array([1.0, 0.9, 1.1]) + truth_meas_sds = np.array([0.05, 0.05, 0.05]) + measurements = _simulate_three_indicators( + n=20, + loadings=truth_loadings, + meas_sds=truth_meas_sds, + factor_var=1.0, + seed=7, + ) + + result = spearman_factor_moments(measurements, sd_floor=1e-3) + + assert np.all(np.isfinite(result.meas_sds)) + assert np.all(result.meas_sds >= 1e-3 - 1e-12) + assert np.isfinite(result.latent_var) + + +def test_spearman_below_two_measurements_returns_invalid(): + measurements = np.random.default_rng(0).normal(size=(100, 1)) + + result = spearman_factor_moments(measurements) + + assert not result.valid + assert result.loadings.shape == (1,) + + +def test_spearman_pairwise_complete_handles_nan(): + truth_loadings = np.array([1.0, 1.2, 0.8]) + truth_meas_sds = np.array([0.3, 0.3, 0.3]) + truth_factor_var = 1.0 + measurements = _simulate_three_indicators( + n=3000, + loadings=truth_loadings, + meas_sds=truth_meas_sds, + factor_var=truth_factor_var, + seed=1, + ) + # Punch a few NaNs into different columns so listwise-complete would + # discard most rows. + rng = np.random.default_rng(2) + for col in range(3): + idx = rng.choice(3000, size=400, replace=False) + measurements[idx, col] = np.nan + + result = spearman_factor_moments(measurements) + + assert result.valid + assert result.loadings[1] == pytest.approx(truth_loadings[1], rel=0.30) + assert result.loadings[2] == pytest.approx(truth_loadings[2], rel=0.30) + + +def test_derive_unexplained_sd_clamped(): + # β'Σβ > latent_var → clamped to floor, not NaN. + sd = derive_unexplained_sd( + latent_var=0.5, + beta=np.array([2.0]), + prev_state_cov=np.array([[1.0]]), + sd_floor=1e-3, + ) + + assert sd == pytest.approx(1e-3, abs=1e-12) + + +def test_derive_unexplained_sd_recovers_residual(): + # latent_var = 1.0, β'Σβ = 0.36 → residual var = 0.64 → sd = 0.8. + sd = derive_unexplained_sd( + latent_var=1.0, + beta=np.array([0.6]), + prev_state_cov=np.array([[1.0]]), + ) + + assert sd == pytest.approx(0.8, rel=1e-9) + + +def test_derive_unexplained_sd_handles_multivariate_state(): + beta = np.array([0.3, 0.4]) + cov = np.array([[1.0, 0.2], [0.2, 1.0]]) + # β'Σβ = 0.09 + 2*0.3*0.4*0.2 + 0.16 = 0.298 + expected = float(np.sqrt(1.0 - 0.298)) + + sd = derive_unexplained_sd(latent_var=1.0, beta=beta, prev_state_cov=cov) + + assert sd == pytest.approx(expected, rel=1e-9) + + +def test_seed_beta_from_ols_recovers_known_coefs(): + rng = np.random.default_rng(0) + n = 500 + x = rng.normal(size=(n, 2)) + y = 0.7 * x[:, 0] - 0.3 * x[:, 1] + 0.1 * rng.normal(size=n) + + beta = seed_beta_from_ols(y, x) + + assert beta.shape == (2,) + assert beta[0] == pytest.approx(0.7, rel=0.10) + assert beta[1] == pytest.approx(-0.3, rel=0.20) + + +def test_seed_beta_from_ols_handles_nan_pairwise(): + rng = np.random.default_rng(0) + n = 500 + x = rng.normal(size=(n, 2)) + y = 0.5 * x[:, 0] + 0.05 * rng.normal(size=n) + y[::5] = np.nan + x[::7, 0] = np.nan + + beta = seed_beta_from_ols(y, x) + + assert beta.shape == (2,) + assert np.all(np.isfinite(beta)) + + +def test_seed_beta_from_ols_returns_zeros_on_rank_deficient(): + n = 50 + x = np.zeros((n, 3)) + y = np.random.default_rng(0).normal(size=n) + + beta = seed_beta_from_ols(y, x) + + assert beta.shape == (3,) + assert np.allclose(beta, 0.0) + + +def test_spearman_result_dataclass_is_frozen(): + result = SpearmanResult( + loadings=np.zeros(2), + meas_sds=np.zeros(2), + latent_var=0.0, + valid=False, + ) + + with pytest.raises(AttributeError): + result.valid = True # type: ignore[misc] diff --git a/tests/test_amn_plot_harmonization.py b/tests/test_amn_plot_harmonization.py new file mode 100644 index 00000000..9c73704b --- /dev/null +++ b/tests/test_amn_plot_harmonization.py @@ -0,0 +1,108 @@ +"""Parametrised tests confirming plot helpers work for CHS, AF, and AMN.""" + +import numpy as np +import pandas as pd +import pytest + +from skillmodels import ( + AMNEstimationOptions, + decompose_measurement_variance, + estimate_amn, +) +from skillmodels.chs.filtered_states import get_filtered_states +from skillmodels.common.model_spec import ( + EstimationOptions, + FactorSpec, + ModelSpec, + Normalizations, +) + + +def _tiny_model() -> ModelSpec: + return ModelSpec( + factors={ + "skills": FactorSpec( + measurements=(("y1", "y2", "y3"), ("y1", "y2", "y3")), + normalizations=Normalizations( + loadings=({"y1": 1}, {"y1": 1}), + intercepts=({"y1": 0}, {}), + ), + transition_function="linear", + ), + }, + estimation_options=EstimationOptions( + robust_bounds=True, bounds_distance=0.001, n_mixtures=1 + ), + ) + + +def _tiny_data(n: int = 500, seed: int = 0) -> pd.DataFrame: + rng = np.random.default_rng(seed) + rows = [] + for caseid in range(n): + f0 = rng.normal() + f1 = 0.6 * f0 + rng.normal(0, 0.5) + for period, f in [(0, f0), (1, f1)]: + rows.append( + { + "caseid": caseid, + "period": period, + "y1": f + rng.normal(0, 0.3), + "y2": 0.9 * f + rng.normal(0, 0.4), + "y3": 1.1 * f + rng.normal(0, 0.5), + } + ) + return pd.DataFrame(rows).set_index(["caseid", "period"]) + + +@pytest.fixture(scope="module") +def amn_fit(): + model = _tiny_model() + data = _tiny_data(n=400) + options = AMNEstimationOptions( + n_mixture_components=2, n_simulation_draws=1000, seed=0 + ) + fit = estimate_amn(model, data, options) + return fit, data + + +def test_get_filtered_states_dispatches_to_amn(amn_fit): + fit, data = amn_fit + + out = get_filtered_states( + model_spec=fit.model_spec, + data=data, + params=fit.all_params, + amn_result=fit, + ) + + assert "unanchored_states" in out + states = out["unanchored_states"]["states"] + assert "skills" in states.columns + assert {"id", "period", "skills"} <= set(states.columns) + + +def test_get_filtered_states_rejects_both_af_and_amn_results(amn_fit): + fit, data = amn_fit + with pytest.raises(ValueError, match="only one of"): + get_filtered_states( + model_spec=fit.model_spec, + data=data, + params=fit.all_params, + af_result=fit, + amn_result=fit, + ) + + +def test_decompose_measurement_variance_works_with_amn_result(amn_fit): + fit, data = amn_fit + + decomp = decompose_measurement_variance( + fit.model_spec, + fit.all_params, + data, + amn_result=fit, + ) + + assert {"loading", "factor_variance", "meas_sd"} <= set(decomp.columns) + assert decomp.shape[0] > 0 diff --git a/tests/test_amn_simulate_and_regress.py b/tests/test_amn_simulate_and_regress.py new file mode 100644 index 00000000..a81e7e25 --- /dev/null +++ b/tests/test_amn_simulate_and_regress.py @@ -0,0 +1,207 @@ +"""Tests for `skillmodels.amn.simulate_and_regress` (AMN Stage 3).""" + +import numpy as np +import pandas as pd + +from skillmodels.amn.simulate_and_regress import ( + _draw_factor_panel, + _fit_linear, + _fit_log_ces, + simulate_and_regress, +) +from skillmodels.amn.types import MinimumDistanceResult +from skillmodels.common.model_spec import ( + EstimationOptions, + FactorSpec, + ModelSpec, + Normalizations, +) +from skillmodels.common.process_model import process_model + + +def _linear_model() -> ModelSpec: + return ModelSpec( + factors={ + "skills": FactorSpec( + measurements=(("y1", "y2", "y3"), ("y1", "y2", "y3")), + normalizations=Normalizations( + loadings=({"y1": 1}, {"y1": 1}), + intercepts=({"y1": 0}, {}), + ), + transition_function="linear", + ), + }, + estimation_options=EstimationOptions( + robust_bounds=True, bounds_distance=0.001, n_mixtures=1 + ), + ) + + +def _make_structural( + means: np.ndarray, + covs: np.ndarray, + slots: tuple[tuple[int, str], ...], +) -> MinimumDistanceResult: + return MinimumDistanceResult( + loadings=pd.DataFrame(), + measurement_intercepts=pd.DataFrame(), + measurement_sds=pd.DataFrame(), + factor_mixture_means=means, + factor_mixture_covariances=covs, + factor_period_slots=slots, + objective_value=0.0, + success=True, + ) + + +def test_fit_linear_recovers_known_coefficients(): + rng = np.random.default_rng(0) + n = 1000 + x_design = rng.normal(size=(n, 2)) + y = 0.5 * x_design[:, 0] - 0.3 * x_design[:, 1] + 1.2 + rng.normal(0, 0.1, size=n) + + params, sd = _fit_linear(y, x_design, ["a", "b"]) + + assert params["a"] == _pytest_approx(0.5, 0.05) + assert params["b"] == _pytest_approx(-0.3, 0.05) + assert params["constant"] == _pytest_approx(1.2, 0.05) + assert sd == _pytest_approx(0.1, abs_tol=0.02) + + +def _pytest_approx(target: float, rel: float = 0.05, *, abs_tol: float | None = None): + import pytest # noqa: PLC0415 + + if abs_tol is not None: + return pytest.approx(target, abs=abs_tol) + return pytest.approx(target, rel=rel) + + +def test_fit_log_ces_recovers_known_rho_and_share(): + rng = np.random.default_rng(1) + n = 2000 + x_design = rng.normal(0, 0.5, size=(n, 2)) + rho_true = -0.5 + gammas_true = np.array([0.65, 0.35]) + exponents = x_design * rho_true + log_inside = np.log( + gammas_true[0] * np.exp(exponents[:, 0]) + + gammas_true[1] * np.exp(exponents[:, 1]) + ) + y = log_inside / rho_true + rng.normal(0, 0.05, size=n) + + params, sd = _fit_log_ces(y, x_design, ["a", "b"], with_constant=False) + + assert params["a"] == _pytest_approx(0.65, 0.15) + assert params["b"] == _pytest_approx(0.35, 0.15) + assert params["phi"] == _pytest_approx(rho_true, abs_tol=0.15) + assert sd == _pytest_approx(0.05, abs_tol=0.05) + + +def test_draw_factor_panel_yields_expected_shape_and_moments(): + slots = ((0, "skills"), (1, "skills")) + truth_means = np.array([[-0.5, -0.2], [0.5, 0.3]]) + truth_covs = np.array( + [ + [[1.0, 0.3], [0.3, 1.1]], + [[0.9, 0.1], [0.1, 1.0]], + ] + ) + structural = _make_structural(truth_means, truth_covs, slots) + + panel = _draw_factor_panel(structural, np.array([0.4, 0.6]), n_draws=20000, seed=0) + + assert panel.shape == (20000, 2) + # Sample-mean on slot 0: 0.4 * (-0.5) + 0.6 * 0.5 = 0.1 + # Sample-mean on slot 1: 0.4 * (-0.2) + 0.6 * 0.3 = 0.1 + np.testing.assert_allclose(panel.mean().to_numpy(), [0.1, 0.1], atol=0.05) + + +def test_simulate_and_regress_returns_linear_transition_for_simple_model(): + model = _linear_model() + processed = process_model(model) + + # Build a structural result where both periods have a single + # factor; truth coefficient for the period-0 -> period-1 transition + # is 0.7 with intercept 0.1. + slots = ((0, "skills"), (1, "skills")) + truth_means = np.array([[0.0, 0.0]]) + truth_covs = np.array([[[1.0, 0.7], [0.7, 1.0 * 0.7**2 + 0.51]]]) + structural = _make_structural(truth_means, truth_covs, slots) + + result = simulate_and_regress( + structural, + processed, + model, + mixture_weights=np.array([1.0]), + n_draws=5000, + seed=0, + ) + + params = result.production_params + slope = float( + params.loc[("transition", 0, "skills", "skills"), "value"] # ty: ignore[invalid-argument-type] + ) + assert slope == _pytest_approx(0.7, abs_tol=0.05) + + +def test_simulate_and_regress_handles_translog(): + """Generic NLS path recovers translog params via the function callable.""" + from skillmodels.common.model_spec import ( # noqa: PLC0415 + EstimationOptions, + FactorSpec, + ModelSpec, + Normalizations, + ) + from skillmodels.common.process_model import process_model # noqa: PLC0415 + + model = ModelSpec( + factors={ + "skills": FactorSpec( + measurements=(("y1", "y2", "y3"), ("y1", "y2", "y3")), + normalizations=Normalizations( + loadings=({"y1": 1}, {"y1": 1}), + intercepts=({"y1": 0}, {}), + ), + transition_function="translog", + ), + }, + estimation_options=EstimationOptions( + robust_bounds=True, bounds_distance=0.001, n_mixtures=1 + ), + ) + processed = process_model(model) + slots = ((0, "skills"), (1, "skills")) + # Cov(period0, period1) chosen so OLS slope ≈ 0.6. + truth_means = np.array([[0.0, 0.0]]) + truth_covs = np.array([[[1.0, 0.6], [0.6, 1.0 * 0.6**2 + 0.4]]]) + structural = MinimumDistanceResult( + loadings=pd.DataFrame(), + measurement_intercepts=pd.DataFrame(), + measurement_sds=pd.DataFrame(), + factor_mixture_means=truth_means, + factor_mixture_covariances=truth_covs, + factor_period_slots=slots, + objective_value=0.0, + success=True, + ) + + result = simulate_and_regress( + structural, + processed, + model, + mixture_weights=np.array([1.0]), + n_draws=5000, + seed=0, + ) + + params = result.production_params + # translog params: linear coefficient on `skills` plus `skills ** 2` + # plus `constant`. The linear coefficient should approach the + # cov / var slope (≈ 0.6); the square coefficient should be small. + assert ("transition", 0, "skills", "skills") in params.index + assert ("transition", 0, "skills", "skills ** 2") in params.index + assert ("transition", 0, "skills", "constant") in params.index + slope = float( + params.loc[("transition", 0, "skills", "skills"), "value"] # ty: ignore[invalid-argument-type] + ) + assert slope == _pytest_approx(0.6, abs_tol=0.1) diff --git a/tests/test_amn_start_values.py b/tests/test_amn_start_values.py new file mode 100644 index 00000000..a8f5ba10 --- /dev/null +++ b/tests/test_amn_start_values.py @@ -0,0 +1,235 @@ +"""Tests for `skillmodels.amn.start_values.get_spearman_start_params`. + +These tests exercise the Spearman + Bartlett-OLS start-value pipeline +(the legacy default, now opt-in via `start_params_strategy="spearman"`). +The new default `"amn"` runs the full Attanasio-Meghir-Nix estimator +upfront and is tested in `test_amn_estimate.py` and via +`test_maximization_inputs.py`. +""" + +import functools + +import numpy as np +import optimagic as om +import pandas as pd +import pytest + +from skillmodels.amn.start_values import ( + get_spearman_start_params, + pool_equality_groups, +) +from skillmodels.chs.maximization_inputs import get_maximization_inputs +from skillmodels.common.config import TEST_DATA_DIR +from skillmodels.common.constraints import select_by_loc +from skillmodels.common.model_spec import ModelSpec +from skillmodels.common.types import EstimationOptions +from skillmodels.common.utilities import reduce_n_periods +from skillmodels.test_data.model2 import MODEL2 + + +@pytest.fixture +def model2_short() -> ModelSpec: + spec = reduce_n_periods(MODEL2, new_n_periods=3) + assert isinstance(spec, ModelSpec) + return spec + + +@pytest.fixture +def model2_data() -> pd.DataFrame: + return pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta").set_index( + ["caseid", "period"] + ) + + +def test_default_strategy_is_amn() -> None: + """`EstimationOptions().start_params_strategy` defaults to "amn".""" + assert EstimationOptions().start_params_strategy == "amn" + + +def test_template_filled_with_spearman_strategy( + model2_short: ModelSpec, model2_data: pd.DataFrame +) -> None: + """`start_params_strategy="spearman"` returns a fully-populated template.""" + spec = model2_short.with_estimation_options( + EstimationOptions(start_params_strategy="spearman") + ) + inputs = get_maximization_inputs(spec, model2_data) + template = inputs["params_template"] + assert not template["value"].isna().any() + + +def test_strategy_none_leaves_nan( + model2_short: ModelSpec, model2_data: pd.DataFrame +) -> None: + """`start_params_strategy="none"` reproduces the legacy NaN behaviour.""" + spec_none = model2_short.with_estimation_options( + EstimationOptions(start_params_strategy="none") + ) + inputs = get_maximization_inputs(spec_none, model2_data) + template = inputs["params_template"] + assert template["value"].isna().any() + + +def test_filled_template_yields_finite_loglike( + model2_short: ModelSpec, model2_data: pd.DataFrame +) -> None: + """The moment-seeded template produces a finite log-likelihood.""" + inputs = get_maximization_inputs(model2_short, model2_data) + val = inputs["loglike"](inputs["params_template"]) + assert np.isfinite(val) + + +def test_loadings_seeded_from_data_not_constant( + model2_short: ModelSpec, model2_data: pd.DataFrame +) -> None: + """Loadings vary across measurements (Spearman seed, not flat 1.0).""" + inputs = get_maximization_inputs(model2_short, model2_data) + template = inputs["params_template"] + loadings = template.loc["loadings", "value"] + free = ( + template.loc["loadings", "lower_bound"] + != template.loc["loadings", "upper_bound"] + ) + free_loadings = loadings[free].to_numpy() + assert (free_loadings != free_loadings[0]).any() + assert not np.allclose(free_loadings, 1.0) + + +def test_meas_sds_seeded_from_data_not_constant( + model2_short: ModelSpec, model2_data: pd.DataFrame +) -> None: + """Measurement SDs vary across indicators (residual SD seed, not 0.5).""" + inputs = get_maximization_inputs(model2_short, model2_data) + template = inputs["params_template"] + meas_sds = template.loc["meas_sds", "value"].to_numpy() + assert (meas_sds != meas_sds[0]).any() + assert (meas_sds > 0).all() + + +def test_initial_cholcovs_diagonal_is_positive( + model2_short: ModelSpec, model2_data: pd.DataFrame +) -> None: + """Initial-cov diagonals are positive (sqrt(latent_var)).""" + inputs = get_maximization_inputs(model2_short, model2_data) + template = inputs["params_template"] + cholcov = template.loc["initial_cholcovs", "value"] + diag_mask = pd.Series( + [name2.split("-")[0] == name2.split("-")[1] for *_, name2 in cholcov.index], + index=cholcov.index, + ) + assert (cholcov[diag_mask] > 0).all() + + +def test_fixed_params_pin_survives_moment_fill( + model2_short: ModelSpec, model2_data: pd.DataFrame +) -> None: + """Entries set by `fixed_params` keep their pinned value.""" + fixed_idx = pd.MultiIndex.from_tuples( + [("transition", 0, "fac1", "fac3"), ("transition", 1, "fac1", "fac3")], + names=["category", "period", "name1", "name2"], + ) + fixed_df = pd.DataFrame({"value": [0.0, 0.0]}, index=fixed_idx) + inputs = get_maximization_inputs(model2_short, model2_data, fixed_params=fixed_df) + template = inputs["params_template"] + assert template.loc[("transition", 0, "fac1", "fac3"), "value"] == 0.0 + assert template.loc[("transition", 1, "fac1", "fac3"), "value"] == 0.0 + + +def test_explicit_strategy_argument_via_helper( + model2_short: ModelSpec, model2_data: pd.DataFrame +) -> None: + """The standalone helper produces the same fills as the wired-in spearman path.""" + spec_none = model2_short.with_estimation_options( + EstimationOptions(start_params_strategy="none") + ) + inputs_raw = get_maximization_inputs(spec_none, model2_data) + template_raw = inputs_raw["params_template"] + filled = get_spearman_start_params(spec_none, model2_data, template_raw) + + spec_spearman = model2_short.with_estimation_options( + EstimationOptions(start_params_strategy="spearman") + ) + inputs_spearman = get_maximization_inputs(spec_spearman, model2_data) + template_spearman = inputs_spearman["params_template"] + + pd.testing.assert_series_equal(filled["value"], template_spearman["value"]) + + +def test_helper_does_not_overwrite_user_set_values( + model2_short: ModelSpec, model2_data: pd.DataFrame +) -> None: + """If the caller already set a non-NaN value, the helper preserves it.""" + spec_none = model2_short.with_estimation_options( + EstimationOptions(start_params_strategy="none") + ) + inputs = get_maximization_inputs(spec_none, model2_data) + template = inputs["params_template"] + sentinel_loc = template.index[template["value"].isna()][0] + template.loc[sentinel_loc, "value"] = 999.0 + filled = get_spearman_start_params(spec_none, model2_data, template) + assert filled.loc[sentinel_loc, "value"] == 999.0 + + +def test_transition_coefficients_seeded_via_ols( + model2_short: ModelSpec, model2_data: pd.DataFrame +) -> None: + """Free transition rows get AMN-style OLS seeds, not constant 0.5.""" + inputs = get_maximization_inputs(model2_short, model2_data) + template = inputs["params_template"] + free_trans = template.loc["transition"] + free_mask = free_trans["lower_bound"] != free_trans["upper_bound"] + free_values = free_trans.loc[free_mask, "value"] + assert (free_values != 0.5).any() + + +def test_shock_sds_seeded_via_residual_variance( + model2_short: ModelSpec, model2_data: pd.DataFrame +) -> None: + """Free shock_sds rows get residual-variance seeds, not flat 0.5.""" + inputs = get_maximization_inputs(model2_short, model2_data) + template = inputs["params_template"] + free_sds = template.loc["shock_sds"] + free_mask = free_sds["lower_bound"] != free_sds["upper_bound"] + free_values = free_sds.loc[free_mask, "value"] + assert (free_values != 0.5).any() + + +def test_pool_equality_groups_averages_unpinned() -> None: + """Members of an `om.EqualityConstraint` group are averaged.""" + idx = pd.MultiIndex.from_tuples( + [ + ("meas_sds", 0, "z1", "-"), + ("meas_sds", 1, "z1", "-"), + ("meas_sds", 2, "z1", "-"), + ], + names=["category", "period", "name1", "name2"], + ) + params = pd.DataFrame({"value": [0.2, 0.4, 0.6]}, index=idx) + constraints: list[om.constraints.Constraint] = [ + om.EqualityConstraint( + selector=functools.partial(select_by_loc, loc=idx), + ), + ] + out = pool_equality_groups(params, constraints) + assert list(out["value"]) == pytest.approx([0.4, 0.4, 0.4]) + + +def test_pool_equality_groups_respects_pinned() -> None: + """If any group member is pinned, that value propagates to the rest.""" + idx = pd.MultiIndex.from_tuples( + [ + ("meas_sds", 0, "z1", "-"), + ("meas_sds", 1, "z1", "-"), + ("meas_sds", 2, "z1", "-"), + ], + names=["category", "period", "name1", "name2"], + ) + params = pd.DataFrame({"value": [0.2, 0.4, 0.6]}, index=idx) + pinned = pd.Series([False, True, False], index=idx) + constraints: list[om.constraints.Constraint] = [ + om.EqualityConstraint( + selector=functools.partial(select_by_loc, loc=idx), + ), + ] + out = pool_equality_groups(params, constraints, keep_pinned_values=pinned) + assert list(out["value"]) == pytest.approx([0.4, 0.4, 0.4]) diff --git a/tests/test_check_model.py b/tests/test_check_model.py index 7f1a36bf..8fbdb411 100644 --- a/tests/test_check_model.py +++ b/tests/test_check_model.py @@ -2,14 +2,14 @@ from types import SimpleNamespace -from skillmodels.check_model import ( +from skillmodels.common.check_model import ( _check_anchoring, _check_loadings_are_not_normalized_to_zero, _check_measurements, _check_normalized_variables_are_present, check_stagemap, ) -from skillmodels.model_spec import FactorSpec, ModelSpec, Normalizations +from skillmodels.common.model_spec import FactorSpec, ModelSpec, Normalizations def test_invalid_stagemap_length() -> None: diff --git a/tests/test_clipping.py b/tests/test_clipping.py index 1afd17e3..ae50d788 100644 --- a/tests/test_clipping.py +++ b/tests/test_clipping.py @@ -3,7 +3,7 @@ import jax.numpy as jnp import numpy as np -from skillmodels.clipping import soft_clipping +from skillmodels.chs.clipping import soft_clipping def test_one_sided_soft_maximum() -> None: diff --git a/tests/test_constraints.py b/tests/test_constraints.py index fede7622..6fac1866 100644 --- a/tests/test_constraints.py +++ b/tests/test_constraints.py @@ -9,7 +9,7 @@ import pytest from pandas.testing import assert_frame_equal -from skillmodels.constraints import ( +from skillmodels.common.constraints import ( FixedConstraintWithValue, _get_anchoring_constraints, _get_constant_factors_constraints, @@ -22,9 +22,9 @@ add_bounds, get_constraints, ) -from skillmodels.process_model import process_model +from skillmodels.common.process_model import process_model +from skillmodels.common.types import Anchoring, Labels, Normalizations from skillmodels.test_data.simplest_augmented_model import SIMPLEST_AUGMENTED_MODEL -from skillmodels.types import Anchoring, Labels, Normalizations def _to_dict(c: om.constraints.Constraint) -> dict[str, Any]: @@ -430,24 +430,24 @@ def test_get_constraints_for_augmented_periods(simplest_augmented_model) -> None endogenous_factors_info=simplest_augmented_model.endogenous_factors_info, ) as_dicts = [_to_dict(c) for c in calculated] + # Only the non-final aug-period of each meas-type should produce + # identity constraints: `get_transition_index_tuples` truncates + # transitions at `aug_periods[:-2]` when endogenous factors are + # present, so emitting fixed constraints at the last STATES- or + # ENDO-typed aug-period would target locs that don't exist in the + # params index. Aug 2 (last STATES-typed) and aug 3 (last + # ENDO-typed) are therefore intentionally absent from the expected + # list. expected = [ {"loc": ("transition", 0, "fac1", "fac1"), "type": "fixed", "value": 1.0}, {"loc": ("transition", 0, "fac1", "fac2"), "type": "fixed", "value": 0.0}, {"loc": ("transition", 0, "fac1", "of"), "type": "fixed", "value": 0.0}, {"loc": ("transition", 0, "fac1", "constant"), "type": "fixed", "value": 0.0}, {"loc": ("shock_sds", 0, "fac1", "-"), "type": "fixed", "value": 0.00000001}, - {"loc": ("transition", 2, "fac1", "fac1"), "type": "fixed", "value": 1.0}, - {"loc": ("transition", 2, "fac1", "fac2"), "type": "fixed", "value": 0.0}, - {"loc": ("transition", 2, "fac1", "of"), "type": "fixed", "value": 0.0}, - {"loc": ("transition", 2, "fac1", "constant"), "type": "fixed", "value": 0.0}, {"loc": ("transition", 1, "fac2", "fac1"), "type": "fixed", "value": 0.0}, {"loc": ("transition", 1, "fac2", "fac2"), "type": "fixed", "value": 1.0}, {"loc": ("transition", 1, "fac2", "of"), "type": "fixed", "value": 0.0}, {"loc": ("transition", 1, "fac2", "constant"), "type": "fixed", "value": 0.0}, {"loc": ("shock_sds", 1, "fac2", "-"), "type": "fixed", "value": 0.00000001}, - {"loc": ("transition", 3, "fac2", "fac1"), "type": "fixed", "value": 0.0}, - {"loc": ("transition", 3, "fac2", "fac2"), "type": "fixed", "value": 1.0}, - {"loc": ("transition", 3, "fac2", "of"), "type": "fixed", "value": 0.0}, - {"loc": ("transition", 3, "fac2", "constant"), "type": "fixed", "value": 0.0}, ] assert_list_equal_except_for_order(as_dicts, expected) diff --git a/tests/test_correlation_heatmap.py b/tests/test_correlation_heatmap.py index 114dc9bc..5b95e852 100644 --- a/tests/test_correlation_heatmap.py +++ b/tests/test_correlation_heatmap.py @@ -9,7 +9,7 @@ import pytest from pandas.testing import assert_frame_equal as afe -from skillmodels.correlation_heatmap import ( +from skillmodels.common.correlation_heatmap import ( _get_mask, _get_measurement_data_for_multiple_periods, _get_measurement_data_for_single_period, @@ -21,7 +21,7 @@ get_scores_corr, plot_correlation_heatmap, ) -from skillmodels.types import Labels +from skillmodels.common.types import Labels REGRESSION_VAULT = Path(__file__).parent / "regression_vault" diff --git a/tests/test_decorators.py b/tests/test_decorators.py index 44dd7645..165d2618 100644 --- a/tests/test_decorators.py +++ b/tests/test_decorators.py @@ -3,7 +3,11 @@ import jax.numpy as jnp import pytest -from skillmodels.decorators import extract_params, jax_array_output, register_params +from skillmodels.common.decorators import ( + extract_params, + jax_array_output, + register_params, +) def test_extract_params_decorator_only_key() -> None: diff --git a/tests/test_diagnostic_plots.py b/tests/test_diagnostic_plots.py index c823aa39..559d4e8f 100644 --- a/tests/test_diagnostic_plots.py +++ b/tests/test_diagnostic_plots.py @@ -6,11 +6,11 @@ import plotly.graph_objects as go import pytest -from skillmodels.diagnostic_plots import ( +from skillmodels.chs.maximization_inputs import get_maximization_inputs +from skillmodels.common.diagnostic_plots import ( plot_likelihood_contributions, plot_residual_boxplots, ) -from skillmodels.maximization_inputs import get_maximization_inputs REGRESSION_VAULT = Path(__file__).parent / "regression_vault" diff --git a/tests/test_filtered_states.py b/tests/test_filtered_states.py index 787e9764..dea9b209 100644 --- a/tests/test_filtered_states.py +++ b/tests/test_filtered_states.py @@ -6,9 +6,9 @@ import pandas as pd import pytest -from skillmodels.config import TEST_DATA_DIR -from skillmodels.filtered_states import get_filtered_states -from skillmodels.maximization_inputs import get_maximization_inputs +from skillmodels.chs.filtered_states import get_filtered_states +from skillmodels.chs.maximization_inputs import get_maximization_inputs +from skillmodels.common.config import TEST_DATA_DIR from skillmodels.test_data.model2 import MODEL2 REGRESSION_VAULT = Path(__file__).parent / "regression_vault" diff --git a/tests/test_kalman_filters.py b/tests/test_kalman_filters.py index 9099696d..73b04040 100644 --- a/tests/test_kalman_filters.py +++ b/tests/test_kalman_filters.py @@ -10,7 +10,7 @@ from filterpy.kalman import JulierSigmaPoints, KalmanFilter from numpy.testing import assert_array_almost_equal as aaae -from skillmodels.kalman_filters import ( +from skillmodels.chs.kalman_filters import ( _calculate_sigma_points, calculate_sigma_scaling_factor_and_weights, kalman_predict, @@ -18,7 +18,7 @@ linear_kalman_predict, transform_sigma_points, ) -from skillmodels.kalman_filters_debug import kalman_update as kalman_update_debug +from skillmodels.chs.kalman_filters_debug import kalman_update as kalman_update_debug jax.config.update("jax_enable_x64", True) diff --git a/tests/test_likelihood_regression.py b/tests/test_likelihood_regression.py index cfe7a621..a7b952b9 100644 --- a/tests/test_likelihood_regression.py +++ b/tests/test_likelihood_regression.py @@ -11,12 +11,12 @@ import pytest from numpy.testing import assert_array_almost_equal as aaae -from skillmodels.config import TEST_DATA_DIR -from skillmodels.decorators import register_params -from skillmodels.maximization_inputs import get_maximization_inputs -from skillmodels.model_spec import ModelSpec, Normalizations +from skillmodels.chs.maximization_inputs import get_maximization_inputs +from skillmodels.common.config import TEST_DATA_DIR +from skillmodels.common.decorators import register_params +from skillmodels.common.model_spec import ModelSpec, Normalizations +from skillmodels.common.utilities import reduce_n_periods from skillmodels.test_data.model2 import MODEL2 -from skillmodels.utilities import reduce_n_periods jax.config.update("jax_enable_x64", True) diff --git a/tests/test_maximization_inputs.py b/tests/test_maximization_inputs.py index eb807d48..3eacb618 100644 --- a/tests/test_maximization_inputs.py +++ b/tests/test_maximization_inputs.py @@ -2,10 +2,19 @@ import jax.numpy as jnp import numpy as np +import optimagic as om import pandas as pd import pytest -from skillmodels.maximization_inputs import _get_jnp_params_vec, _to_numpy +from skillmodels.chs.maximization_inputs import ( + _get_jnp_params_vec, + _to_numpy, + get_maximization_inputs, +) +from skillmodels.common.config import TEST_DATA_DIR +from skillmodels.common.constraints import FixedConstraintWithValue +from skillmodels.common.utilities import reduce_n_periods +from skillmodels.test_data.model2 import MODEL2 def test_to_numpy_with_dict() -> None: @@ -56,3 +65,97 @@ def test_get_jnp_params_vec_additional_entries_raises() -> None: ) with pytest.raises(ValueError, match="additional entries"): _get_jnp_params_vec(params, target_index) + + +@pytest.fixture +def model2_short(): + return reduce_n_periods(MODEL2, new_n_periods=3) + + +@pytest.fixture +def model2_data(): + return pd.read_stata(TEST_DATA_DIR / "model2_simulated_data.dta").set_index( + ["caseid", "period"] + ) + + +def test_get_maximization_inputs_with_fixed_params_pins_cross_factor_gamma( + model2_short, model2_data +) -> None: + """Fix gamma_fac3 in log_ces at 0 via fixed_params; verify CHS pipeline. + + Before probability + fixed-param support in optimagic, combining a + `ProbabilityConstraint` with a `FixedConstraint` on one of its selected + entries raised `InvalidConstraintError`. Now the fold machinery removes + the fixed entry from the selector; CHS should build a valid problem + whose params_template and constraint list reflect the pin and whose + log-likelihood evaluates to a finite number. + """ + fixed_idx = pd.MultiIndex.from_tuples( + [ + ("transition", 0, "fac1", "fac3"), + ("transition", 1, "fac1", "fac3"), + ], + names=["category", "period", "name1", "name2"], + ) + fixed_df = pd.DataFrame({"value": [0.0, 0.0]}, index=fixed_idx) + + inputs = get_maximization_inputs(model2_short, model2_data, fixed_params=fixed_df) + + template = inputs["params_template"] + assert template.loc[("transition", 0, "fac1", "fac3"), "value"] == 0.0 + assert template.loc[("transition", 1, "fac1", "fac3"), "value"] == 0.0 + user_fixed = [ + c + for c in inputs["constraints"] + if isinstance(c, FixedConstraintWithValue) and c.loc in set(fixed_idx) + ] + assert len(user_fixed) == 2 + + # optimagic should accept the combined problem with our fold helper. + params = template.copy() + # Fill free entries with reasonable starting values compatible with the + # simplex constraint: split the remaining 1.0 between fac1 and fac2. + for t in (0, 1): + params.loc[("transition", t, "fac1", "fac1"), "value"] = 0.5 + params.loc[("transition", t, "fac1", "fac2"), "value"] = 0.5 + params["value"] = params["value"].fillna(0.1) + + om.check_constraints( + params=params[["value"]], + constraints=inputs["constraints"], + ) + + loglike_val = inputs["loglike"](params) + assert np.isfinite(loglike_val) + + +def test_get_maximization_inputs_with_fixed_params_non_zero( + model2_short, model2_data +) -> None: + """Fix a gamma at a non-zero value; remaining simplex sums to 1 - c.""" + fixed_idx = pd.MultiIndex.from_tuples( + [("transition", 0, "fac1", "fac3")], + names=["category", "period", "name1", "name2"], + ) + fixed_df = pd.DataFrame({"value": [0.2]}, index=fixed_idx) + + inputs = get_maximization_inputs(model2_short, model2_data, fixed_params=fixed_df) + + template = inputs["params_template"] + assert template.loc[("transition", 0, "fac1", "fac3"), "value"] == 0.2 + params = template.copy() + params.loc[("transition", 0, "fac1", "fac1"), "value"] = 0.4 + params.loc[("transition", 0, "fac1", "fac2"), "value"] = 0.4 + params.loc[("transition", 1, "fac1", "fac1"), "value"] = 0.4 + params.loc[("transition", 1, "fac1", "fac2"), "value"] = 0.4 + params.loc[("transition", 1, "fac1", "fac3"), "value"] = 0.2 + params["value"] = params["value"].fillna(0.1) + + om.check_constraints( + params=params[["value"]], + constraints=inputs["constraints"], + ) + + loglike_val = inputs["loglike"](params) + assert np.isfinite(loglike_val) diff --git a/tests/test_model_spec.py b/tests/test_model_spec.py index dda3958d..ea21827a 100644 --- a/tests/test_model_spec.py +++ b/tests/test_model_spec.py @@ -2,8 +2,13 @@ import pytest -from skillmodels.model_spec import AnchoringSpec, FactorSpec, ModelSpec, Normalizations -from skillmodels.types import EstimationOptions +from skillmodels.common.model_spec import ( + AnchoringSpec, + FactorSpec, + ModelSpec, + Normalizations, +) +from skillmodels.common.types import EstimationOptions def _minimal_dict(): diff --git a/tests/test_params_index.py b/tests/test_params_index.py index 9a3fc6c0..55210ed6 100644 --- a/tests/test_params_index.py +++ b/tests/test_params_index.py @@ -5,8 +5,8 @@ import pandas as pd import pytest -from skillmodels.config import TEST_DATA_DIR -from skillmodels.params_index import ( +from skillmodels.common.config import TEST_DATA_DIR +from skillmodels.common.params_index import ( get_control_params_index_tuples, get_initial_cholcovs_index_tuples, get_loadings_index_tuples, @@ -17,9 +17,9 @@ get_transition_index_tuples, initial_mean_index_tuples, ) -from skillmodels.process_model import process_model +from skillmodels.common.process_model import process_model +from skillmodels.common.types import TransitionInfo from skillmodels.test_data.model2 import MODEL2 -from skillmodels.types import TransitionInfo @pytest.fixture diff --git a/tests/test_parse_params.py b/tests/test_parse_params.py index 07272511..b273233c 100644 --- a/tests/test_parse_params.py +++ b/tests/test_parse_params.py @@ -14,11 +14,11 @@ import pytest from numpy.testing import assert_array_equal as aae -from skillmodels.config import TEST_DATA_DIR -from skillmodels.parse_params import create_parsing_info, parse_params -from skillmodels.process_model import process_model +from skillmodels.common.config import TEST_DATA_DIR +from skillmodels.common.parse_params import create_parsing_info, parse_params +from skillmodels.common.process_model import process_model +from skillmodels.common.types import Anchoring from skillmodels.test_data.model2 import MODEL2 -from skillmodels.types import Anchoring @pytest.fixture diff --git a/tests/test_process_data.py b/tests/test_process_data.py index ec2fce30..f5c5b93a 100644 --- a/tests/test_process_data.py +++ b/tests/test_process_data.py @@ -10,8 +10,8 @@ import pytest from numpy.testing import assert_array_equal as aae -from skillmodels.config import TEST_DATA_DIR -from skillmodels.process_data import ( +from skillmodels.common.config import TEST_DATA_DIR +from skillmodels.common.process_data import ( _augment_data_for_endogenous_factors, _generate_controls_array, _generate_measurements_array, @@ -19,9 +19,9 @@ _handle_controls_with_missings, pre_process_data, ) -from skillmodels.process_model import process_model +from skillmodels.common.process_model import process_model +from skillmodels.common.types import Labels from skillmodels.test_data.simplest_augmented_model import SIMPLEST_AUGMENTED_MODEL -from skillmodels.types import Labels def test_pre_process_data() -> None: diff --git a/tests/test_process_debug_data.py b/tests/test_process_debug_data.py index 4b69e118..593a1668 100644 --- a/tests/test_process_debug_data.py +++ b/tests/test_process_debug_data.py @@ -4,7 +4,7 @@ import pandas as pd import pytest -from skillmodels.process_debug_data import ( +from skillmodels.chs.process_debug_data import ( _create_post_update_states, _process_residuals, create_state_ranges, diff --git a/tests/test_process_model.py b/tests/test_process_model.py index 3b9a03e3..4d7482e9 100644 --- a/tests/test_process_model.py +++ b/tests/test_process_model.py @@ -8,11 +8,11 @@ import pytest from pandas.testing import assert_frame_equal -from skillmodels.config import TEST_DATA_DIR -from skillmodels.model_spec import FactorSpec -from skillmodels.process_model import get_has_endogenous_factors, process_model +from skillmodels.common.config import TEST_DATA_DIR +from skillmodels.common.model_spec import FactorSpec +from skillmodels.common.process_model import get_has_endogenous_factors, process_model +from skillmodels.common.types import Normalizations, TransitionInfo from skillmodels.test_data.model2 import MODEL2 -from skillmodels.types import Normalizations, TransitionInfo @pytest.fixture diff --git a/tests/test_qr.py b/tests/test_qr.py index b6f74532..387702c3 100644 --- a/tests/test_qr.py +++ b/tests/test_qr.py @@ -7,7 +7,7 @@ from numpy.testing import assert_array_almost_equal as aaae from numpy.typing import NDArray -from skillmodels.qr import qr_gpu +from skillmodels.chs.qr import qr_gpu SEED = 20 diff --git a/tests/test_simulate_data.py b/tests/test_simulate_data.py index 782367e7..31ba9879 100644 --- a/tests/test_simulate_data.py +++ b/tests/test_simulate_data.py @@ -7,15 +7,15 @@ import pytest from numpy.testing import assert_array_almost_equal as aaae -from skillmodels.model_spec import ( +from skillmodels.common.model_spec import ( EstimationOptions, FactorSpec, ModelSpec, Normalizations, ) -from skillmodels.params_index import get_params_index -from skillmodels.process_model import process_model -from skillmodels.simulate_data import ( +from skillmodels.common.params_index import get_params_index +from skillmodels.common.process_model import process_model +from skillmodels.common.simulate_data import ( _collapse_aug_periods_to_periods, _get_shock, measurements_from_states, diff --git a/tests/test_transition_functions.py b/tests/test_transition_functions.py index 363f3bdf..c5ebf398 100644 --- a/tests/test_transition_functions.py +++ b/tests/test_transition_functions.py @@ -6,7 +6,7 @@ import pytest from numpy.testing import assert_array_almost_equal as aaae -from skillmodels.transition_functions import ( +from skillmodels.common.transition_functions import ( constant, constraints_log_ces, identity_constraints_linear, diff --git a/tests/test_types.py b/tests/test_types.py index 4f03b868..1716cdc9 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -5,7 +5,7 @@ import pytest -from skillmodels.types import FactorInfo, _make_immutable +from skillmodels.common.types import FactorInfo, _make_immutable def test_make_immutable_list_to_tuple() -> None: diff --git a/tests/test_utilities.py b/tests/test_utilities.py index 66e05632..70794d57 100644 --- a/tests/test_utilities.py +++ b/tests/test_utilities.py @@ -10,10 +10,9 @@ import pytest from pandas.testing import assert_frame_equal, assert_index_equal -from skillmodels.model_spec import ModelSpec -from skillmodels.process_model import process_model -from skillmodels.test_data.model2 import MODEL2 -from skillmodels.utilities import ( +from skillmodels.common.model_spec import ModelSpec +from skillmodels.common.process_model import process_model +from skillmodels.common.utilities import ( _extend_params, _get_params_index, extract_factors, @@ -25,6 +24,7 @@ switch_translog_to_linear, update_parameter_values, ) +from skillmodels.test_data.model2 import MODEL2 @pytest.fixture diff --git a/tests/test_utils_plotting.py b/tests/test_utils_plotting.py index 728677a2..77e971bd 100644 --- a/tests/test_utils_plotting.py +++ b/tests/test_utils_plotting.py @@ -2,7 +2,7 @@ import numpy as np -from skillmodels.utils_plotting import get_layout_kwargs, get_make_subplot_kwargs +from skillmodels.common.utils_plotting import get_layout_kwargs, get_make_subplot_kwargs def test_get_layout_kwargs_defaults() -> None: diff --git a/tests/test_variance_decomposition.py b/tests/test_variance_decomposition.py index acfa924e..194b2be6 100644 --- a/tests/test_variance_decomposition.py +++ b/tests/test_variance_decomposition.py @@ -4,7 +4,7 @@ import pytest from numpy.testing import assert_array_almost_equal as aaae -from skillmodels.variance_decomposition import ( +from skillmodels.common.variance_decomposition import ( _compute_variance_decomposition, summarize_measurement_reliability, ) diff --git a/tests/test_visualize_factor_distributions.py b/tests/test_visualize_factor_distributions.py index 3baa03bb..8957da23 100644 --- a/tests/test_visualize_factor_distributions.py +++ b/tests/test_visualize_factor_distributions.py @@ -4,17 +4,17 @@ import pandas as pd -from skillmodels.config import TEST_DATA_DIR -from skillmodels.filtered_states import get_filtered_states -from skillmodels.maximization_inputs import get_maximization_inputs -from skillmodels.simulate_data import simulate_dataset -from skillmodels.test_data.model2 import MODEL2 -from skillmodels.visualize_factor_distributions import ( +from skillmodels.chs.filtered_states import get_filtered_states +from skillmodels.chs.maximization_inputs import get_maximization_inputs +from skillmodels.common.config import TEST_DATA_DIR +from skillmodels.common.simulate_data import simulate_dataset +from skillmodels.common.visualize_factor_distributions import ( bivariate_density_contours, bivariate_density_surfaces, combine_distribution_plots, univariate_densities, ) +from skillmodels.test_data.model2 import MODEL2 REGRESSION_VAULT = Path(__file__).parent / "regression_vault" diff --git a/tests/test_visualize_transition_equations.py b/tests/test_visualize_transition_equations.py index de630f7a..3494f41d 100644 --- a/tests/test_visualize_transition_equations.py +++ b/tests/test_visualize_transition_equations.py @@ -4,13 +4,13 @@ import pandas as pd -from skillmodels.config import TEST_DATA_DIR -from skillmodels.maximization_inputs import get_maximization_inputs -from skillmodels.test_data.model2 import MODEL2 -from skillmodels.visualize_transition_equations import ( +from skillmodels.chs.maximization_inputs import get_maximization_inputs +from skillmodels.common.config import TEST_DATA_DIR +from skillmodels.common.visualize_transition_equations import ( combine_transition_plots, get_transition_plots, ) +from skillmodels.test_data.model2 import MODEL2 REGRESSION_VAULT = Path(__file__).parent / "regression_vault"