Skip to content

Commit

Permalink
merged with main
Browse files Browse the repository at this point in the history
  • Loading branch information
Jammy2211 committed Oct 3, 2024
2 parents 1a4d959 + 121fd96 commit 8a6f2e1
Show file tree
Hide file tree
Showing 113 changed files with 2,410 additions and 776 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.8, 3.9, '3.10', '3.11']
python-version: [3.9, '3.10', '3.11', '3.12']
steps:
- name: Checkout PyAutoConf
uses: actions/checkout@v2
Expand Down
14 changes: 7 additions & 7 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -125,13 +125,13 @@ venv.bak/
.idea
workspace/output/
output
test/optimize/test_fit
test/mle/test_fit
test/test_files/text/
test/
test_autofit/optimize/test_fit/
test_autofit/mle/test_fit/
test_autofit/test_files/text/psycopg2-binary==2.8.1
test_autofit/test_files/text/
fit/test_autofit/optimize/test_fit
fit/test_autofit/mle/test_fit
*.DS_Store

test_autofit/config/priors/old
Expand All @@ -157,7 +157,7 @@ test_autofit/samples.csv
__MACOSX
*.swp
test/autofit/test_fit
# Byte-compiled / optimized / DLL files
# Byte-compiled / mled / DLL files
__pycache__/
*.py[cod]
*$py.class
Expand Down Expand Up @@ -264,13 +264,13 @@ venv.bak/
.idea
workspace/output/
output
test/optimize/test_fit
test/mle/test_fit
test/test_files/text/
test/
test_autofit/optimize/test_fit/
test_autofit/mle/test_fit/
test_autofit/test_files/text/psycopg2-binary==2.8.1
test_autofit/test_files/text/
fit/test_autofit/optimize/test_fit
fit/test_autofit/mle/test_fit
*.DS_Store

test_autofit/config/priors/old
Expand Down
4 changes: 2 additions & 2 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ The following links are useful for new starters:

- `The introduction Jupyter Notebook on Binder <https://mybinder.org/v2/gh/Jammy2211/autofit_workspace/release?filepath=notebooks/overview/overview_1_the_basics.ipynb>`_, where you can try **PyAutoFit** in a web browser (without installation).

- `The autofit_workspace GitHub repository <https://github.com/Jammy2211/autofit_workspace>`_, which includes example scripts and the `HowToFit Jupyter notebook lectures <https://github.com/Jammy2211/autofit_workspace/tree/master/notebooks/howtofit>`_ which give new users a step-by-step introduction to **PyAutoFit**.
- `The autofit_workspace GitHub repository <https://github.com/Jammy2211/autofit_workspace>`_, which includes example scripts and the `HowToFit Jupyter notebook lectures <https://github.com/Jammy2211/autofit_workspace/tree/main/notebooks/howtofit>`_ which give new users a step-by-step introduction to **PyAutoFit**.

Support
-------
Expand All @@ -72,7 +72,7 @@ API Overview
To illustrate the **PyAutoFit** API, we use an illustrative toy model of fitting a one-dimensional Gaussian to
noisy 1D data. Here's the ``data`` (black) and the model (red) we'll fit:

.. image:: https://raw.github.com/rhayes777/PyAutoFit/master/files/toy_model_fit.png
.. image:: https://raw.github.com/rhayes777/PyAutoFit/main/files/toy_model_fit.png
:width: 400

We define our model, a 1D Gaussian by writing a Python class using the format below:
Expand Down
16 changes: 9 additions & 7 deletions autofit/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@
from .mapper.prior_model.annotation import AnnotationPriorModel
from .mapper.prior_model.collection import Collection
from .mapper.prior_model.prior_model import Model
from .mapper.prior_model.prior_model import Model
from .mapper.prior_model.array import Array
from .non_linear.search.abstract_search import NonLinearSearch
from .non_linear.analysis.visualize import Visualizer
from .non_linear.analysis.analysis import Analysis
Expand All @@ -66,18 +66,20 @@
from .non_linear.grid.sensitivity import Sensitivity
from .non_linear.initializer import InitializerBall
from .non_linear.initializer import InitializerPrior
from .non_linear.initializer import SpecificRangeInitializer
from .non_linear.initializer import InitializerParamBounds
from .non_linear.initializer import InitializerParamStartPoints
from .non_linear.search.mcmc.auto_correlations import AutoCorrelationsSettings
from .non_linear.search.mcmc.emcee.search import Emcee
from .non_linear.search.mcmc.zeus.search import Zeus
from .non_linear.search.nest.nautilus.search import Nautilus
from .non_linear.search.nest.dynesty.search.dynamic import DynestyDynamic
from .non_linear.search.nest.dynesty.search.static import DynestyStatic
from .non_linear.search.nest.ultranest.search import UltraNest
from .non_linear.search.optimize.drawer.search import Drawer
from .non_linear.search.optimize.lbfgs.search import LBFGS
from .non_linear.search.optimize.pyswarms.search.globe import PySwarmsGlobal
from .non_linear.search.optimize.pyswarms.search.local import PySwarmsLocal
from .non_linear.search.mle.drawer.search import Drawer
from .non_linear.search.mle.bfgs.search import BFGS
from .non_linear.search.mle.bfgs.search import LBFGS
from .non_linear.search.mle.pyswarms.search.globe import PySwarmsGlobal
from .non_linear.search.mle.pyswarms.search.local import PySwarmsLocal
from .non_linear.paths.abstract import AbstractPaths
from .non_linear.paths import DirectoryPaths
from .non_linear.paths import DatabasePaths
Expand Down Expand Up @@ -132,4 +134,4 @@ def save_abc(pickler, obj):

conf.instance.register(__file__)

__version__ = "2024.07.16.1"
__version__ = "2024.9.21.2"
2 changes: 1 addition & 1 deletion autofit/config/non_linear/README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,4 @@ Files

- ``mcmc.yaml``: Settings default behaviour of MCMC non-linear searches (e.g. Emcee).
- ``nest.yaml``: Settings default behaviour of nested sampler non-linear searches (e.g. Dynesty).
- ``optimizer.yaml``: Settings default behaviour of optimizer non-linear searches (e.g. PySwarms).
- ``mle.yaml``: Settings default behaviour of maximum likelihood estimator (mle) searches (e.g. PySwarms).
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
# Configuration files that customize the default behaviour of non-linear searches.

# **PyAutoFit** supports the following optimizer algorithms:
# **PyAutoFit** supports the following maximum likelihood estimator (MLE) algorithms:

# - PySwarms: https://github.com/ljvmiranda921/pyswarms / https://pyswarms.readthedocs.io/en/latest/index.html

# Settings in the [search], [run] and [options] entries are specific to each nested algorithm and should be
# determined by consulting that optimizers method's own readthedocs.
# determined by consulting that method's own readthedocs.

PySwarmsGlobal:
run:
Expand Down Expand Up @@ -49,6 +49,44 @@ PySwarmsLocal:
updates:
iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results.
remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable).
BFGS:
search:
tol: null
options:
disp: false
eps: 1.0e-08
ftol: 2.220446049250313e-09
gtol: 1.0e-05
iprint: -1.0
maxcor: 10
maxfun: 15000
maxiter: 15000
maxls: 20
initialize: # The method used to generate where walkers are initialized in parameter space {prior | ball}.
method: ball # priors: samples are initialized by randomly drawing from each parameter's prior. ball: samples are initialized by randomly drawing unit values from a narrow uniform distribution.
ball_lower_limit: 0.49 # The lower limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method.
ball_upper_limit: 0.51 # The upper limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method.
parallel:
number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing.
printing:
silence: false # If True, the default print output of the non-linear search is silcened and not printed by the Python interpreter.
updates:
iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results.
remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable).
Drawer:
search:
total_draws: 50
initialize: # The method used to generate where walkers are initialized in parameter space {prior | ball}.
method: ball # priors: samples are initialized by randomly drawing from each parameter's prior. ball: samples are initialized by randomly drawing unit values from a narrow uniform distribution.
ball_lower_limit: 0.49 # The lower limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method.
ball_upper_limit: 0.51 # The upper limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method.
parallel:
number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing.
printing:
silence: false # If True, the default print output of the non-linear search is silcened and not printed by the Python interpreter.
updates:
iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results.
remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable).
LBFGS:
search:
tol: null
Expand Down
5 changes: 3 additions & 2 deletions autofit/config/visualize/plots_search.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,6 @@ nest:
corner_anesthetic: true # Output corner figure (using anestetic) during a non-linear search fit?
mcmc:
corner_cornerpy: true # Output corner figure (using corner.py) during a non-linear search fit?
optimize:
corner_cornerpy: true # Output corner figure (using corner.py) during a non-linear search fit?
mle:
subplot_parameters: true # Output a subplot of the best-fit parameters of the model?
log_likelihood_vs_iteration: true # Output a plot of the log likelihood versus iteration number?
3 changes: 2 additions & 1 deletion autofit/database/aggregator/aggregator.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import logging
from abc import ABC, abstractmethod
from sqlalchemy import text
from typing import Optional, List, Union, cast

from ..sqlalchemy_ import sa
Expand Down Expand Up @@ -370,7 +371,7 @@ def _fits_for_query(self, query: str) -> List[m.Fit]:
query
"""
logger.debug(f"Executing query: {query}")
fit_ids = {row[0] for row in self.session.execute(query)}
fit_ids = {row[0] for row in self.session.execute(text(query))}

logger.info(f"{len(fit_ids)} fit(s) found matching query")
query = self.session.query(m.Fit).filter(m.Fit.id.in_(fit_ids))
Expand Down
85 changes: 18 additions & 67 deletions autofit/database/migration/migration.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
import logging
from abc import ABC, abstractmethod
from hashlib import md5
from sqlalchemy import text
from typing import Union, Generator, Iterable, Optional

from .session_wrapper import SessionWrapper
from ..sqlalchemy_ import sa

logger = logging.getLogger(
__name__
)
logger = logging.getLogger(__name__)


class Identifiable(ABC):
Expand All @@ -19,22 +18,13 @@ def id(self) -> str:
A unique identifier generated by hashing a string
"""

def __eq__(
self,
other: Union["Identifiable", str]
) -> bool:
def __eq__(self, other: Union["Identifiable", str]) -> bool:
"""
Compares ids
"""
if isinstance(
other,
Identifiable
):
if isinstance(other, Identifiable):
return self.id == other.id
if isinstance(
other,
str
):
if isinstance(other, str):
return self.id == other
return False

Expand All @@ -57,13 +47,7 @@ def id(self) -> str:
"""
Hash generated from underlying SQL statements
"""
return md5(
":".join(
self.strings
).encode(
"utf-8"
)
).hexdigest()
return md5(":".join(self.strings).encode("utf-8")).hexdigest()

def __str__(self):
return "\n".join(self.strings)
Expand All @@ -72,10 +56,7 @@ def __str__(self):


class Revision(Identifiable):
def __init__(
self,
steps: Iterable[Step]
):
def __init__(self, steps: Iterable[Step]):
"""
A specific revision of the database. This comprises
a set of sequential steps and is uniquely identified
Expand All @@ -95,12 +76,7 @@ def id(self) -> str:
A unique identifier created by joining and hashing the
identifiers of comprised steps.
"""
return md5(
":".join(
step.id for step
in self.steps
).encode("utf-8")
).hexdigest()
return md5(":".join(step.id for step in self.steps).encode("utf-8")).hexdigest()

def __sub__(self, other: "Revision") -> "Revision":
"""
Expand All @@ -121,17 +97,11 @@ def __sub__(self, other: "Revision") -> "Revision":
An object comprising steps required to move from the other
revision to this revision.
"""
return Revision(tuple(
step for step in self.steps
if step not in other.steps
))
return Revision(tuple(step for step in self.steps if step not in other.steps))


class Migrator:
def __init__(
self,
*steps: Step
):
def __init__(self, *steps: Step):
"""
Manages migration of an old database.
Expand All @@ -153,14 +123,9 @@ def revisions(self) -> Generator[Revision, None, None]:
starting on the first step and terminating on any step
"""
for i in range(1, len(self._steps) + 1):
yield Revision(
self._steps[:i]
)
yield Revision(self._steps[:i])

def get_steps(
self,
revision_id: Optional[str] = None
) -> Iterable[Step]:
def get_steps(self, revision_id: Optional[str] = None) -> Iterable[Step]:
"""
Retrieve steps required to go from the specified
revision to the latest revision.
Expand Down Expand Up @@ -188,9 +153,7 @@ def latest_revision(self) -> Revision:
The latest revision according to the steps passed to the
Migrator
"""
return Revision(
self._steps
)
return Revision(self._steps)

def migrate(self, session: sa.orm.Session):
"""
Expand All @@ -207,19 +170,11 @@ def migrate(self, session: sa.orm.Session):
session
A session pointing at some database.
"""
wrapper = SessionWrapper(
session
)
wrapper = SessionWrapper(session)
revision_id = wrapper.revision_id
steps = list(
self.get_steps(
revision_id
)
)
steps = list(self.get_steps(revision_id))
if len(steps) == 0:
logger.info(
"Database already at latest revision"
)
logger.info("Database already at latest revision")
return

latest_revision_id = self.latest_revision.id
Expand All @@ -230,14 +185,10 @@ def migrate(self, session: sa.orm.Session):
for step in steps:
for string in step.strings:
try:
session.execute(
string
)
session.execute(text(string))
except sa.exc.OperationalError as e:
logger.debug(e)

wrapper.revision_id = self.latest_revision.id

logger.info(
f"revision_id updated to {wrapper.revision_id}"
)
logger.info(f"revision_id updated to {wrapper.revision_id}")
Loading

0 comments on commit 8a6f2e1

Please sign in to comment.