Source code for bumps.parameter

# This program is public domain
# Author: Paul Kienzle
"""
Fitting parameter objects.

Parameters are a big part of the interface between the model and the fitting
engine.  By saving and retrieving values and ranges from the parameter, the
fitting engine does not need to be aware of the structure of the model.

Users can also perform calculations with parameters, tying together different
parts of the model, or different models.
"""
#__all__ = [ 'Parameter']
import operator
import sys
from six.moves import reduce
import warnings
from copy import copy
import math
from functools import wraps

import numpy as np
from numpy import inf, isinf, isfinite

from . import bounds as mbounds

# TODO: avoid evaluation of subexpressions if parameters do not change.
# This is especially important if the subexpression invokes an expensive
# calculation via a parameterized function.  This will require a restructuring
# of the parameter claas.  The park-1.3 solution is viable: given a parameter
# set, figure out which order the expressions need to be evaluated by
# building up a dependency graph.  With a little care, we can check which
# parameters have actually changed since the last calculation update, and
# restrict the dependency graph to just them.
# TODO: support full aliasing, so that floating point model attributes can
# be aliased to a parameter.  The same technique as subexpressions applies:
# when the parameter is changed, the model will be updated and will need
# to be re-evaluated.

# TODO: maybe move this to util?
[docs] def to_dict(p): if hasattr(p, 'to_dict'): return p.to_dict() elif isinstance(p, (tuple, list)): return [to_dict(v) for v in p] elif isinstance(p, dict): return {k: to_dict(v) for k, v in p.items()} elif isinstance(p, (bool, str, float, int, type(None))): return p elif isinstance(p, np.ndarray): # TODO: what about inf, nan and object arrays? return p.tolist() elif False and callable(p): # TODO: consider including functions and arbitrary values import base64 import dill encoding = base64.encodebytes(dill.dumps(p)).decode('ascii') return {'type': 'dill', 'value': str(p), 'encoding': encoding} ## To recovert the function # if allow_unsafe_code: # encoding = item['encoding'] # p = dill.loads(base64.decodebytes(encoding).encode('ascii')) else: #print(f"converting type {type(p)} to str") return str(p)
[docs] class BaseParameter(object): """ Root of the parameter class, defining arithmetic on parameters """ # Parameters are fixed unless told otherwise fixed = True fittable = False discrete = False _bounds = mbounds.Unbounded() name = None value = None # value is an attribute of the derived class # Parameters may be dependent on other parameters, and the # fit engine will need to access them.
[docs] def parameters(self): return [self]
[docs] def pmp(self, plus, minus=None, limits=None): """ Allow the parameter to vary as value +/- percent. pmp(*percent*) -> [value*(1-percent/100), value*(1+percent/100)] pmp(*plus*, *minus*) -> [value*(1+minus/100), value*(1+plus/100)] In the *plus/minus* form, one of the numbers should be plus and the other minus, but it doesn't matter which. If *limits* are provided, bound the end points of the range to lie within the limits. The resulting range is converted to "nice" numbers. """ bounds = mbounds.pmp(self.value, plus, minus, limits=limits) self.bounds = mbounds.Bounded(*bounds) return self
[docs] def pm(self, plus, minus=None, limits=None): """ Allow the parameter to vary as value +/- delta. pm(*delta*) -> [value-delta, value+delta] pm(*plus*, *minus*) -> [value+minus, value+plus] In the *plus/minus* form, one of the numbers should be plus and the other minus, but it doesn't matter which. If *limits* are provided, bound the end points of the range to lie within the limits. The resulting range is converted to "nice" numbers. """ bounds = mbounds.pm(self.value, plus, minus, limits=limits) self.bounds = mbounds.Bounded(*bounds) return self
[docs] def dev(self, std, mean=None, limits=None, sigma=None, mu=None): """ Allow the parameter to vary according to a normal distribution, with deviations from the mean added to the overall cost function for the model. If *mean* is None, then it defaults to the current parameter value. If *limits* are provide, then use a truncated normal distribution. Note: *sigma* and *mu* have been replaced by *std* and *mean*, but are left in for backward compatibility. """ if sigma is not None or mu is not None: # CRUFT: remove sigma and mu parameters warnings.warn(DeprecationWarning("use std,mean instead of mu,sigma in Parameter.dev")) if sigma is not None: std = sigma if mu is not None: mean = mu if mean is None: mean = self.value # Note: value is an attribute of the derived class if limits is None: self.bounds = mbounds.Normal(mean, std) else: self.bounds = mbounds.BoundedNormal(mean, std, limits) return self
[docs] def pdf(self, dist): """ Allow the parameter to vary according to any continuous scipy.stats distribution. """ self.bounds = mbounds.Distribution(dist) return self
[docs] def range(self, low, high): """ Allow the parameter to vary within the given range. """ self.bounds = mbounds.init_bounds((low, high)) return self
[docs] def soft_range(self, low, high, std): """ Allow the parameter to vary within the given range, or with Gaussian probability, stray from the range. """ self.bounds = mbounds.SoftBounded(low, high, std) return self
@property def bounds(self): """Fit bounds""" # print "getting bounds for",self,self._bounds return self._bounds @bounds.setter def bounds(self, b): # print "setting bounds for",self if self.fittable: self.fixed = (b is None) self._bounds = b # Functional form of parameter value access def __call__(self): return self.value # Parameter algebra: express relationships between parameters def __gt__(self, other): return Constraint(self, other, "GT", ">") def __ge__(self, other): return Constraint(self, other, "GE", ">=") def __le__(self, other): return Constraint(self, other, "LE", "<=") def __lt__(self, other): return Constraint(self, other, "LT", "<") # def __eq__(self, other): # return ConstraintEQ(self, other) # def __ne__(self, other): # return ConstraintNE(self, other) def __add__(self, other): return Operator(self, other, "add", "+") def __sub__(self, other): return Operator(self, other, "sub", "-") def __mul__(self, other): return Operator(self, other, "mul", "*") def __div__(self, other): return Operator(self, other, "truediv", "/") def __pow__(self, other): return Operator(self, other, "pow", "**") def __radd__(self, other): return Operator(other, self, "add", "+") def __rsub__(self, other): return Operator(other, self, "sub", "-") def __rmul__(self, other): return Operator(other, self, "mul", "*") def __rdiv__(self, other): return Operator(other, self, "truediv", "/") def __rpow__(self, other): return Operator(other, self, "pow", "**") def __abs__(self): return _abs(self) def __neg__(self): return self * -1 def __pos__(self): return self def __float__(self): return float(self.value) __truediv__ = __div__ __rtruediv__ = __rdiv__
[docs] def nllf(self): """ Return -log(P) for the current parameter value. """ return self.bounds.nllf(self.value)
[docs] def residual(self): """ Return the z score equivalent for the current parameter value. That is, the given the value of the parameter in the underlying distribution, find the equivalent value in the standard normal. For a gaussian, this is the z score, in which you subtract the mean and divide by the standard deviation to get the number of sigmas away from the mean. For other distributions, you need to compute the cdf of value in the parameter distribution and invert it using the ppf from the standard normal distribution. """ return self.bounds.residual(self.value)
[docs] def valid(self): """ Return true if the parameter is within the valid range. """ return not isinf(self.nllf())
[docs] def format(self): """ Format the parameter, value and range as a string. """ return "%s=%g in %s" % (self, self.value, self.bounds)
def __str__(self): name = self.name if self.name is not None else '?' return name def __repr__(self): return "Parameter(%s)" % self
[docs] def to_dict(self): """ Return a dict represention of the object. """ # When reconstructing a model from json we will need to tie parameters # together that were tied before. This can be done by managing a # cache of allocated parameters indexed by id, and pulling from that # cache on recontruction if the id already exists, otherwise create # a new entry. Conveniently, this will handle free variable references # in parameter sets as well. Note that the entire parameter description # will be repeated each time it occurs, but there should be few # enough of these that it isn't a problem. # TODO: use id that is stable from session to session. # TODO: have mechanism for clearing cache between save/load. return dict( type=type(self).__name__, id=id(self), # Warning: this will be different every session name=self.name, value=self.value, fixed=self.fixed, fittable=self.fittable, bounds=to_dict(self._bounds), )
[docs] class Constant(BaseParameter): """ An unmodifiable value. """ fittable = False fixed = True @property def value(self): return self._value def __init__(self, value, name=None): self._value = value self.name = name
# to_dict() can inherit from BaseParameter
[docs] class Parameter(BaseParameter): """ A parameter is a symbolic value. It can be fixed or it can vary within bounds. p = Parameter(3).pmp(10) # 3 +/- 10% p = Parameter(3).pmp(-5,10) # 3 in [2.85,3.3] rounded to 2 digits p = Parameter(3).pm(2) # 3 +/- 2 p = Parameter(3).pm(-1,2) # 3 in [2,5] p = Parameter(3).range(0,5) # 3 in [0,5] It has hard limits on the possible values, and a range that should live within those hard limits. The value should lie within the range for it to be valid. Some algorithms may drive the value outside the range in order to satisfy soft It has a value which should lie within the range. Other properties can decorate the parameter, such as tip for tool tip and units for units. """ fittable = True
[docs] @classmethod def default(cls, value, **kw): """ Create a new parameter with the *value* and *kw* attributes, or return the existing parameter if *value* is already a parameter. The attributes are the same as those for Parameter, or whatever subclass *cls* of Parameter is being created. """ # Need to constrain the parameter to fit within fixed limits and # to receive a name if a name has not already been provided. if isinstance(value, BaseParameter): return value else: return cls(value, **kw)
[docs] def set(self, value): """ Set a new value for the parameter, ignoring the bounds. """ self.value = value
[docs] def clip_set(self, value): """ Set a new value for the parameter, clipping it to the bounds. """ low, high = self.bounds.limits self.value = min(max(value, low), high)
def __init__(self, value=None, bounds=None, fixed=None, name=None, **kw): # UI nicities: # 1. check if we are started with value=range or bounds=range; if we # are given bounds, then assume this is a fitted parameter, otherwise # the parameter defaults to fixed; if value is not set, use the # midpoint of the range. if bounds is None: try: lo, hi = value warnings.warn(DeprecationWarning("parameters can no longer be initialized with a fit range")) bounds = lo, hi value = None except TypeError: pass if fixed is None: fixed = (bounds is None) bounds = mbounds.init_bounds(bounds) if value is None: value = bounds.start_value() # Store whatever values the user needs to associate with the parameter # Models should set units and tool tips so the user interface has # something to work with. limits = kw.get('limits', (-inf, inf)) for k, v in kw.items(): setattr(self, k, v) # Initialize bounds, with limits clipped to the hard limits for the # parameter def clip(x, a, b): return min(max(x, a), b) self.bounds = bounds self.bounds.limits = (clip(self.bounds.limits[0], *limits), clip(self.bounds.limits[1], *limits)) self.value = value self.fixed = fixed self.name = name
[docs] def randomize(self, rng=None): """ Set a random value for the parameter. """ self.value = self.bounds.random(rng if rng is not None else mbounds.RNG)
[docs] def feasible(self): """ Value is within the limits defined by the model """ return self.bounds.limits[0] <= self.value <= self.bounds.limits[1]
# to_dict() can inherit from BaseParameter
[docs] class Reference(Parameter): """ Create an adaptor so that a model attribute can be treated as if it were a parameter. This allows only direct access, wherein the storage for the parameter value is provided by the underlying model. Indirect access, wherein the storage is provided by the parameter, cannot be supported since the parameter has no way to detect that the model is asking for the value of the attribute. This means that model attributes cannot be assigned to parameter expressions without some trigger to update the values of the attributes in the model. """ def __init__(self, obj, attr, **kw): self.obj = obj self.attr = attr kw.setdefault('name', ".".join([obj.__class__.__name__, attr])) Parameter.__init__(self, **kw) @property def value(self): return getattr(self.obj, self.attr) @value.setter def value(self, value): setattr(self.obj, self.attr, value)
[docs] def to_dict(self): ret = Parameter.to_dict(self) ret["attr"] = self.attr # TODO: another impossibility---an arbitrary python object # Clearly we need a (safe??) json pickler to handle the full # complexity of an arbitrary model. ret["obj"] = to_dict(self.obj) return ret
[docs] class ParameterSet(object): """ A parameter that depends on the model. """ def __init__(self, reference, names=None): """ Create a parameter set, with one parameter for each model name. *names* is the list of model names. *reference* is the underlying :class:`parameter.Parameter` that will be set when the model is selected. *parameters* will be created, with one parameter per model. """ self.names = names self.reference = reference # TODO: explain better why parameters are using np.array # Force numpy semantics on slice operations by using an array # of objects rather than a list of objects self.parameters = np.array([copy(reference) for _ in names]) # print self.reference, self.parameters for p, n in zip(self.parameters, names): p.name = " ".join((n, p.name)) # Reference is no longer directly fittable self.reference.fittable = False
[docs] def to_dict(self): return { "type": "ParameterSet", "names": self.names, "reference": to_dict(self.reference), # Note: parameters are stored in a numpy array "parameters": to_dict(self.parameters.tolist()), }
# Make the parameter set act like a list def __getitem__(self, i): """ Return the underlying parameter for the model index. Index can either be an integer or a model name. It can also be a slice, in which case a new parameter set is returned. """ # Try looking up the free variable by model name rather than model # index. If this fails, assume index is a model index. try: i = self.names.index(i) except ValueError: pass if isinstance(i, slice): obj = copy(self) obj.names = self.names[i] obj.reference = self.reference obj.parameters = self.parameters[i] return obj return self.parameters[i] def __setitem__(self, i, v): """ Set the underlying parameter for the model index. Index can either be an integer or a model name. It can also be a slice, in which case all underlying parameters are set, either to the same value if *v* is a single parameter, otherwise *v* must have the same length as the slice. """ try: i = self.names.index(i) except ValueError: pass self.parameters[i] = v def __iter__(self): return iter(self.parameters) def __len__(self): return len(self.parameters)
[docs] def set_model(self, index): """ Set the underlying model parameter to the value of the nth model. """ self.reference.value = self.parameters[index].value
[docs] def get_model(self, index): """ Get the reference and underlying model parameter for the nth model. """ return (id(self.reference), self.parameters[index])
@property def values(self): return [p.value for p in self.parameters] @values.setter def values(self, values): for p, v in zip(self.parameters, values): p.value = v
[docs] def range(self, *args, **kw): """ Like :meth:`Parameter.range`, but applied to all models. """ for p in self.parameters: p.range(*args, **kw)
[docs] def pm(self, *args, **kw): """ Like :meth:`Parameter.pm`, but applied to all models. """ for p in self.parameters: p.pm(*args, **kw)
[docs] def pmp(self, *args, **kw): """ Like :meth:`Parameter.pmp`, but applied to all models. """ for p in self.parameters: p.pmp(*args, **kw)
[docs] class FreeVariables(object): """ A collection of parameter sets for a group of models. *names* is the set of model names. The parameters themselves are specified as key=value pairs, with key being the attribute name which is used to retrieve the parameter set and value being a :class:`Parameter` containing the parameter that is shared between the models. In order to evaluate the log likelihood of all models simultaneously, the fitting program will need to call set_model with the model index for each model in turn in order to substitute the values from the free variables into the model. This allows us to share a common sample across multiple data sets, with each dataset having its own values for some of the sample parameters. The alternative is to copy the entire sample structure, sharing references to common parameters and creating new parameters for each model for the free parameters. Setting up these copies was inconvenient. """ def __init__(self, names=None, **kw): if names is None: raise TypeError("FreeVariables needs name=[model1, model2, ...]") self.names = names # Create slots to hold the free variables self._parametersets = dict((k, ParameterSet(v, names=names)) for k, v in kw.items()) # Shouldn't need explicit __getstate__/__setstate__ but mpi4py pickle # chokes without it. def __getstate__(self): return self.__dict__ def __setstate__(self, state): self.__dict__ = state def __getattr__(self, k): """ Return the parameter set for the given free parameter. """ try: return self._parametersets[k] except KeyError: raise AttributeError('FreeVariables has no attribute %r' % k)
[docs] def parameters(self): """ Return the set of free variables for all the models. """ return dict((k, v.parameters) for k, v in self._parametersets.items())
[docs] def to_dict(self): return { 'type': type(self).__name__, 'names': self.names, 'parameters': to_dict(self._parametersets) }
[docs] def set_model(self, i): """ Set the reference parameters for model *i*. """ for p in self._parametersets.values(): p.set_model(i)
[docs] def get_model(self, i): """ Get the parameters for model *i* as {reference: substitution} """ return dict(p.get_model(i) for p in self._parametersets.values())
# Current implementation computes values on the fly, so you only # need to plug the values into the parameters and the parameters # are automatically updated. # # This will not work well for wrapped models. In those cases you # want to do a number of optimizations, such as only updating the # # ==== Comparison operators === COMPARISONS = [ ('GT', '>'), ('GE', '>='), ('LE', '<='), ('LT', '<'), ('EQ', '=='), ('NE', '!=') ]
[docs] class Constraint(object): def __init__(self, a, b, op_name, op_str=""): import operator self.a, self.b = a, b self.op_name = op_name self.op = getattr(operator, op_name.lower()) self.op_str = op_str def __bool__(self): return self.op(float(self.a), float(self.b)) __nonzero__ = __bool__ def __str__(self): return "(%s %s %s)" %(self.a, self.op_str, self.b)
# ==== Arithmetic operators === ALLOWED_OPERATORS = ["add","sub","mul","truediv","floordiv","pow"]
[docs] class Operator(BaseParameter): """ Parameter operator """ def __init__(self, a, b, op_name, op_str): import operator if not op_name.lower() in ALLOWED_OPERATORS: raise ValueError("Operator name %s is not in allowed operators: %s" % (op_name, str(ALLOWED_OPERATORS))) self.a, self.b = a,b self.op_name = op_name self.op = getattr(operator, op_name.lower()) self.op_str = op_str pars = [] if isinstance(a,BaseParameter): pars += a.parameters() if isinstance(b,BaseParameter): pars += b.parameters() self._parameters = pars self.name = str(self)
[docs] def parameters(self): return self._parameters
[docs] def to_dict(self): return dict( type="Operator", op_name=self.op_name, op_str=self.op_str, left=to_dict(self.a), right=to_dict(self.b), )
@property def value(self): return self.op(float(self.a), float(self.b)) @property def dvalue(self): return float(self.a) def __str__(self): return "(%s %s %s)" % (self.a,self.op_str, self.b)
[docs] def substitute(a): """ Return structure a with values substituted for all parameters. The function traverses lists, tuples and dicts recursively. Things which are not parameters are returned directly. """ if isinstance(a, BaseParameter): return float(a.value) elif isinstance(a, tuple): return tuple(substitute(v) for v in a) elif isinstance(a, list): return [substitute(v) for v in a] elif isinstance(a, dict): return dict((k, substitute(v)) for k, v in a.items()) elif isinstance(a, np.ndarray): return np.array([substitute(v) for v in a]) else: return a
[docs] class Function(BaseParameter): """ Delayed function evaluator. f.value evaluates the function with the values of the parameter arguments at the time f.value is referenced rather than when the function was invoked. """ __slots__ = ['op', 'args', 'kw'] def __init__(self, op, *args, **kw): self.name = kw.pop('name', None) self.op, self.args, self.kw = op, args, kw self._parameters = self._find_parameters() def _find_parameters(self): # Figure out which arguments to the function are parameters #deps = [p for p in self.args if isinstance(p,BaseParameter)] args = [arg for arg in self.args if isinstance(arg, BaseParameter)] kw = dict((name, arg) for name, arg in self.kw.items() if isinstance(arg, BaseParameter)) deps = flatten((args, kw)) # Find out which other parameters these parameters depend on. res = [] for p in deps: res.extend(p.parameters()) return res
[docs] def parameters(self): return self._parameters
def _value(self): # Expand args and kw, replacing instances of parameters # with their values return self.op(*substitute(self.args), **substitute(self.kw)) value = property(_value)
[docs] def to_dict(self): return { "type": "Function", "name": self.name, # TODO: function not stored properly in json "op": to_dict(self.op), "args": to_dict(self.args), "kw": to_dict(self.kw), }
def __getstate__(self): return self.name, self.op, self.args, self.kw def __setstate__(self, state): self.name, self.op, self.args, self.kw = state self._parameters = self._find_parameters() def __str__(self): if self.name is not None: name = self.name else: args = [str(v) for v in self.args] kw = [str(k) + "=" + str(v) for k, v in self.kw.items()] name = self.op.__name__ + "(" + ", ".join(args + kw) + ")" return name
#return "%s:%g" % (name, self.value)
[docs] def function(op): """ Convert a function into a delayed evaluator. The value of the function is computed from the values of the parameters at the time that the function value is requested rather than when the function is created. """ # Note: @functools.wraps(op) does not work with numpy ufuncs # Note: @decorator does not work with builtins like abs def function_generator(*args, **kw): return Function(op, *args, **kw) function_generator.__name__ = op.__name__ function_generator.__doc__ = op.__doc__ return function_generator
_abs = function(abs) # Numpy trick: math functions from numpy delegate to the math function of # the class if that function exists as a class attribute. BaseParameter.exp = function(math.exp) BaseParameter.expm1 = function(math.expm1) BaseParameter.log = function(math.log) BaseParameter.log10 = function(math.log10) BaseParameter.log1p = function(math.log1p) BaseParameter.sqrt = function(math.sqrt) BaseParameter.degrees = function(math.degrees) BaseParameter.radians = function(math.radians) BaseParameter.sin = function(math.sin) BaseParameter.cos = function(math.cos) BaseParameter.tan = function(math.tan) BaseParameter.arcsin = function(math.asin) BaseParameter.arccos = function(math.acos) BaseParameter.arctan = function(math.atan) BaseParameter.sinh = function(math.sinh) BaseParameter.cosh = function(math.cosh) BaseParameter.tanh = function(math.tanh) BaseParameter.arcsinh = function(math.asinh) BaseParameter.arccosh = function(math.acosh) BaseParameter.arctanh = function(math.atanh) BaseParameter.ceil = function(math.ceil) BaseParameter.floor = function(math.floor) BaseParameter.trunc = function(math.trunc)
[docs] def boxed_function(f): box = function(f) @wraps(f) def wrapped(*args, **kw): if any(isinstance(v, BaseParameter) for v in args): return box(*args, **kw) else: return f(*args, **kw) return wrapped
# arctan2 is special since either argument can be a parameter arctan2 = boxed_function(math.atan2) # Trig functions defined in degrees rather than radians
[docs] @boxed_function def cosd(v): """Return the cosine of x (measured in in degrees).""" return math.cos(math.radians(v))
[docs] @boxed_function def sind(v): """Return the sine of x (measured in in degrees).""" return math.sin(math.radians(v))
[docs] @boxed_function def tand(v): """Return the tangent of x (measured in in degrees).""" return math.tan(math.radians(v))
[docs] @boxed_function def acosd(v): """Return the arc cosine (measured in in degrees) of x.""" return math.degrees(math.acos(v))
arccosd = acosd
[docs] @boxed_function def asind(v): """Return the arc sine (measured in in degrees) of x.""" return math.degrees(math.asin(v))
arcsind = asind
[docs] @boxed_function def atand(v): """Return the arc tangent (measured in in degrees) of x.""" return math.degrees(math.atan(v))
arctand = atand
[docs] @boxed_function def atan2d(dy, dx): """Return the arc tangent (measured in in degrees) of y/x. Unlike atan(y/x), the signs of both x and y are considered.""" return math.degrees(math.atan2(dy, dx))
arctan2d = atan2d
[docs] def flatten(s): if isinstance(s, (tuple, list, np.ndarray)): return reduce(lambda a, b: a + flatten(b), s, []) elif isinstance(s, set): raise TypeError("parameter flattening cannot order sets") elif isinstance(s, dict): return reduce(lambda a, b: a + flatten(s[b]), sorted(s.keys()), []) elif isinstance(s, BaseParameter): return [s] elif s is None: return [] else: raise TypeError("don't understand type %s for %r" % (type(s), s))
[docs] def format(p, indent=0, freevars={}, field=None): """ Format parameter set for printing. Note that this only says how the parameters are arranged, not how they relate to each other. """ p = freevars.get(id(p), p) if isinstance(p, dict) and p != {}: res = [] for k in sorted(p.keys()): if k.startswith('_'): continue s = format(p[k], indent + 2, field=k, freevars=freevars) label = " " * indent + "." + k if s.endswith('\n'): res.append(label + "\n" + s) else: res.append(label + " = " + s + '\n') if '_index' in p: res .append(format(p['_index'], indent, freevars=freevars)) return "".join(res) elif isinstance(p, (list, tuple, np.ndarray)) and len(p): res = [] for k, v in enumerate(p): s = format(v, indent + 2, freevars=freevars) label = " " * indent + "[%d]" % k if s.endswith('\n'): res.append(label + '\n' + s) else: res.append(label + ' = ' + s + '\n') return "".join(res) elif isinstance(p, Parameter): s = "" if str(p) != field: s += str(p) + " = " s += "%g" % p.value if not p.fixed: s += " in [%g,%g]" % tuple(p.bounds.limits) return s elif isinstance(p, BaseParameter): return "%s = %g" % (str(p), p.value) else: return "None"
[docs] def summarize(pars, sorted=False): """ Return a stylized list of parameter names and values with range bars suitable for printing. If sorted, then print the parameters sorted alphabetically by name. """ output = [] if sorted: pars = sorted(pars, cmp=lambda x, y: cmp(x.name, y.name)) for p in pars: if not isfinite(p.value): bar = ["*invalid* "] else: position = int(p.bounds.get01(p.value) * 9.999999999) bar = ['.'] * 10 if position < 0: bar[0] = '<' elif position > 9: bar[9] = '>' else: bar[position] = '|' output.append("%40s %s %10g in %s" % (p.name, "".join(bar), p.value, p.bounds)) return "\n".join(output)
[docs] def unique(s): """ Return the unique set of parameters The ordering is stable. The same parameters/dependencies will always return the same ordering, with the first occurrence first. """ # Walk structures such as dicts and lists pars = flatten(s) # print "====== flattened" # print "\n".join("%s:%s"%(id(p),p) for p in pars) # Also walk parameter expressions pars = pars + flatten([p.parameters() for p in pars]) # print "====== extended" # print "\n".join("%s:%s"%(id(p),p) for p in pars) # TODO: implement n log n rather than n^2 uniqueness algorithm # problem is that the sorting has to be unique across a pickle. result = [] for p in pars: if not any(p is q for q in result): result.append(p) # print "====== unique" # print "\n".join("%s:%s"%(id(p),p) for p in result) # Return the complete set of parameters return result
[docs] def fittable(s): """ Return the list of fittable parameters in no paraticular order. Note that some fittable parameters may be fixed during the fit. """ return [p for p in unique(s) if not p.fittable]
[docs] def varying(s): """ Return the list of fitted parameters in the model. This is the set of parameters that will vary during the fit. """ return [p for p in unique(s) if not p.fixed]
[docs] def randomize(s): """ Set random values to the parameters in the parameter set, with values chosen according to the bounds. """ for p in s: p.value = p.bounds.random(1)[0]
[docs] def current(s): return [p.value for p in s]
# ========= trash ===================
[docs] class IntegerParameter(Parameter): discrete = True def _get_value(self): return self._value def _set_value(self, value): self._value = int(value) value = property(_get_value, _set_value)
[docs] class Alias(object): """ Parameter alias. Rather than modifying a model to contain a parameter slot, allow the parameter to exist outside the model. The resulting parameter will have the full parameter semantics, including the ability to replace a fixed value with a parameter expression. **Deprecated** :class:`Reference` does this better. """ def __init__(self, obj, attr, p=None, name=None): self.obj = obj self.attr = attr if name is None: name = ".".join([obj.__class__.__name__, attr]) self.p = Parameter.default(p, name=name)
[docs] def update(self): setattr(self.obj, self.attr, self.p.value)
[docs] def parameters(self): return self.p.parameters()
[docs] def to_dict(self): return { 'type': type(self).__name__, 'p': to_dict(self.p), # TODO: can't json pickle arbitrary objects 'obj': to_dict(self.obj), 'attr': self.attr, }
[docs] def test_operator(): a = Parameter(1, name='a') b = Parameter(2, name='b') a_b = a + b a.value = 3 assert a_b.value == 5. assert a_b.name == '(a + b)'