From 6478823cf9570ba439ff550a1ce137b13847e792 Mon Sep 17 00:00:00 2001 From: Takuya Iwanaga Date: Wed, 24 Oct 2018 00:08:47 +1100 Subject: [PATCH 1/3] autopep8 --- active_subspaces/domains.py | 218 +++++++++---------- active_subspaces/gradients.py | 24 +-- active_subspaces/integrals.py | 86 ++++---- active_subspaces/optimizers.py | 82 ++++---- active_subspaces/response_surfaces.py | 103 +++++---- active_subspaces/subspaces.py | 219 ++++++++++---------- active_subspaces/utils/designs.py | 12 +- active_subspaces/utils/misc.py | 99 +++++---- active_subspaces/utils/plotters.py | 44 ++-- active_subspaces/utils/qp_solver.py | 42 ++-- active_subspaces/utils/quadrature.py | 46 ++-- active_subspaces/utils/response_surfaces.py | 118 +++++------ active_subspaces/utils/simrunners.py | 52 ++--- tests/test_as_optimizers.py | 2 +- tests/test_domains.py | 16 +- tests/test_gradients.py | 16 +- tests/test_qp_solver.py | 1 - tests/test_quadrature.py | 2 +- tests/test_response_surfaces.py | 4 +- tests/test_subspaces.py | 30 +-- 20 files changed, 603 insertions(+), 613 deletions(-) diff --git a/active_subspaces/domains.py b/active_subspaces/domains.py index fda1fe8..1458229 100644 --- a/active_subspaces/domains.py +++ b/active_subspaces/domains.py @@ -8,24 +8,24 @@ class ActiveVariableDomain(): """A base class for the domain of functions of active variables. - + Attributes ---------- subspaces : Subspaces subspaces that define the domain - m : int + m : int the dimension of the simulation inputs - n : int + n : int the dimension of the active subspace - vertY : ndarray - n-dimensional vertices that define the boundary of the domain when the + vertY : ndarray + n-dimensional vertices that define the boundary of the domain when the m-dimensional space is a hypercube - vertX : ndarray + vertX : ndarray corners of the m-dimensional hypercube that map to the points `vertY` convhull : scipy.spatial.ConvexHull the ConvexHull object defined by the vertices `vertY` constraints : dict - a dictionary of linear inequality constraints conforming to the + a dictionary of linear inequality constraints conforming to the specifications used in the scipy.optimizer library Notes @@ -40,7 +40,7 @@ class ActiveVariableDomain(): class UnboundedActiveVariableDomain(ActiveVariableDomain): """Domain of functions with unbounded domains (Gaussian weight). - + An class for the domain of functions of active variables when the space of simulation parameters is unbounded. @@ -54,7 +54,7 @@ def __init__(self, subspaces): Parameters ---------- - subspaces : Subspaces + subspaces : Subspaces a Subspaces object with the `compute` method already called """ if not isinstance(subspaces, Subspaces): @@ -68,7 +68,7 @@ def __init__(self, subspaces): class BoundedActiveVariableDomain(ActiveVariableDomain): """Domain of functions with bounded domains (uniform on hypercube). - + An class for the domain of functions of active variables when the space of simulation parameters is bounded. @@ -80,10 +80,10 @@ class BoundedActiveVariableDomain(ActiveVariableDomain): def __init__(self, subspaces): """Initialize the BoundedActiveVariableDomain - + Parameters ---------- - subspaces : Subspaces + subspaces : Subspaces a Subspaces object with the `compute` method already called """ if not isinstance(subspaces, Subspaces): @@ -145,7 +145,7 @@ class ActiveVariableMap(): def __init__(self, domain): """Initialize the ActiveVariableMap. - + Parameters ---------- domain : ActiveVariableDomain @@ -155,23 +155,23 @@ def __init__(self, domain): def forward(self, X): """Map full variables to active variables. - + Map the points in the original input space to the active and inactive variables. Parameters ---------- X : ndarray - an M-by-m matrix, each row of `X` is a point in the original + an M-by-m matrix, each row of `X` is a point in the original parameter space Returns ------- - Y : ndarray + Y : ndarray M-by-n matrix that contains points in the space of active variables. Each row of `Y` corresponds to a row of `X`. - Z : ndarray - M-by-(m-n) matrix that contains points in the space of inactive + Z : ndarray + M-by-(m-n) matrix that contains points in the space of inactive variables. Each row of `Z` corresponds to a row of `X`. """ @@ -182,25 +182,25 @@ def forward(self, X): def inverse(self, Y, N=1): """Find points in full space that map to active variable points. - + Map the points in the active variable space to the original parameter space. - + Parameters ---------- Y : ndarray M-by-n matrix that contains points in the space of active variables N : int, optional - the number of points in the original parameter space that are + the number of points in the original parameter space that are returned that map to the given active variables (default 1) Returns ------- X : ndarray - (M*N)-by-m matrix that contains points in the original parameter + (M*N)-by-m matrix that contains points in the original parameter space ind : ndarray - (M*N)-by-1 matrix that contains integer indices. These indices + (M*N)-by-1 matrix that contains integer indices. These indices identify which rows of `X` map to which rows of `Y`. Notes @@ -220,24 +220,24 @@ def inverse(self, Y, N=1): def regularize_z(self, Y, N): """Pick inactive variables associated active variables. - + Find points in the space of inactive variables to complete the inverse map. - + Parameters ---------- Y : ndarray M-by-n matrix that contains points in the space of active variables N : int - The number of points in the original parameter space that are + The number of points in the original parameter space that are returned that map to the given active variables Returns ------- - Z : ndarray - (M)-by-(m-n)-by-N matrix that contains values of the inactive + Z : ndarray + (M)-by-(m-n)-by-N matrix that contains values of the inactive variables - + Notes ----- The base class does not implement `regularize_z`. Specific @@ -249,7 +249,7 @@ def regularize_z(self, Y, N): class BoundedActiveVariableMap(ActiveVariableMap): """Class for mapping between active and bounded full variables. - + A class for the map between active/inactive and original variables when the original variables are bounded by a hypercube with a uniform density. @@ -259,24 +259,24 @@ class BoundedActiveVariableMap(ActiveVariableMap): """ def regularize_z(self, Y, N): """Pick inactive variables associated active variables. - + Find points in the space of inactive variables to complete the inverse map. - + Parameters ---------- Y : ndarray M-by-n matrix that contains points in the space of active variables N : int - The number of points in the original parameter space that are + The number of points in the original parameter space that are returned that map to the given active variables Returns ------- - Z : ndarray - (M)-by-(m-n)-by-N matrix that contains values of the inactive + Z : ndarray + (M)-by-(m-n)-by-N matrix that contains values of the inactive variables - + Notes ----- This implementation of `regularize_z` uses the function `sample_z` to @@ -287,18 +287,18 @@ def regularize_z(self, Y, N): m, n = W1.shape # sample the z's - # TODO: preallocate and organize properly - + # TODO: preallocate and organize properly + Zlist = [] for y in Y: Zlist.append(sample_z(N, y, W1, W2)) - + Z = np.swapaxes(np.array(Zlist),1,2) return Z class UnboundedActiveVariableMap(ActiveVariableMap): """Class for mapping between active and unbounded full variables. - + A class for the map between active/inactive and original variables when the original variables are ubbounded and the space is equipped with a standard Gaussian density. @@ -309,24 +309,24 @@ class UnboundedActiveVariableMap(ActiveVariableMap): """ def regularize_z(self, Y, N): """Pick inactive variables associated active variables. - + Find points in the space of inactive variables to complete the inverse map. - + Parameters ---------- Y : ndarray M-by-n matrix that contains points in the space of active variables N : int - The number of points in the original parameter space that are + The number of points in the original parameter space that are returned that map to the given active variables Returns ------- - Z : ndarray - (M)-by-(m-n)-by-N matrix that contains values of the inactive + Z : ndarray + (M)-by-(m-n)-by-N matrix that contains values of the inactive variables - + Notes ----- This implementation of `regularize_z` samples the inactive variables @@ -341,19 +341,19 @@ def regularize_z(self, Y, N): def nzv(m, n): """Number of zonotope vertices. - + Compute the number of zonotope vertices for a linear map from R^m to R^n. - + Parameters ---------- m : int the dimension of the hypercube n : int the dimension of the low-dimesional subspace - + Returns ------- - N : int + N : int the number of vertices defining the zonotope """ if not isinstance(m, int): @@ -371,20 +371,20 @@ def nzv(m, n): def interval_endpoints(W1): """Compute the range of a 1d active variable. - + Parameters ---------- W1 : ndarray - m-by-1 matrix that contains the eigenvector that defines the first + m-by-1 matrix that contains the eigenvector that defines the first active variable Returns ------- Y : ndarray - 2-by-1 matrix that contains the endpoints of the interval defining the + 2-by-1 matrix that contains the endpoints of the interval defining the range of the 1d active variable X : ndarray - 2-by-m matrix that contains the corners of the m-dimensional hypercube + 2-by-m matrix that contains the corners of the m-dimensional hypercube that map to the active variable endpoints """ @@ -402,17 +402,17 @@ def interval_endpoints(W1): def unique_rows(S): """Return the unique rows from ndarray - + Parameters ---------- S : ndarray array with rows to reduces - + Returns ------- T : ndarray version of `S` with unique rows - + Notes ----- http://stackoverflow.com/questions/16970982/find-unique-rows-in-numpy-array @@ -425,8 +425,8 @@ def zonotope_vertices(W1, Nsamples=10000, maxcount=100000): Parameters ---------- - W1 : ndarray - m-by-n matrix that contains the eigenvector bases of the n-dimensional + W1 : ndarray + m-by-n matrix that contains the eigenvector bases of the n-dimensional active subspace Nsamples : int, optional number of samples per iteration to check (default 1e4) @@ -435,16 +435,16 @@ def zonotope_vertices(W1, Nsamples=10000, maxcount=100000): Returns ------- - Y : ndarray + Y : ndarray nzv-by-n matrix that contains the zonotope vertices - X : ndarray + X : ndarray nzv-by-m matrix that contains the corners of the m-dimensional hypercube that map to the zonotope vertices """ m, n = W1.shape totalverts = nzv(m,n) - + # make sure they're ints Nsamples = int(Nsamples) maxcount = int(maxcount) @@ -454,7 +454,7 @@ def zonotope_vertices(W1, Nsamples=10000, maxcount=100000): X = unique_rows(np.sign(np.dot(Z, W1.transpose()))) X = unique_rows(np.vstack((X, -X))) N = X.shape[0] - + count = 0 while N < totalverts: Z = np.random.normal(size=(Nsamples, n)) @@ -465,38 +465,38 @@ def zonotope_vertices(W1, Nsamples=10000, maxcount=100000): count += 1 if count > maxcount: break - + numverts = X.shape[0] if totalverts > numverts: print 'Warning: {} of {} vertices found.'.format(numverts, totalverts) - + Y = np.dot(X, W1) return Y.reshape((numverts, n)), X.reshape((numverts, m)) - + def sample_z(N, y, W1, W2): """Sample inactive variables. - + Sample values of the inactive variables for a fixed value of the active variables when the original variables are bounded by a hypercube. Parameters ---------- - N : int + N : int the number of inactive variable samples - y : ndarray + y : ndarray the value of the active variables - W1 : ndarray - m-by-n matrix that contains the eigenvector bases of the n-dimensional + W1 : ndarray + m-by-n matrix that contains the eigenvector bases of the n-dimensional active subspace - W2 : ndarray - m-by-(m-n) matrix that contains the eigenvector bases of the + W2 : ndarray + m-by-(m-n) matrix that contains the eigenvector bases of the (m-n)-dimensional inactive subspace Returns ------- Z : ndarray - N-by-(m-n) matrix that contains values of the inactive variable that + N-by-(m-n) matrix that contains values of the inactive variable that correspond to the given `y` Notes @@ -534,30 +534,30 @@ def hit_and_run_z(N, y, W1, W2): Parameters ---------- - N : int + N : int the number of inactive variable samples - y : ndarray + y : ndarray the value of the active variables - W1 : ndarray - m-by-n matrix that contains the eigenvector bases of the n-dimensional + W1 : ndarray + m-by-n matrix that contains the eigenvector bases of the n-dimensional active subspace - W2 : ndarray - m-by-(m-n) matrix that contains the eigenvector bases of the + W2 : ndarray + m-by-(m-n) matrix that contains the eigenvector bases of the (m-n)-dimensional inactive subspace Returns ------- Z : ndarray - N-by-(m-n) matrix that contains values of the inactive variable that - correspond to the given `y` - + N-by-(m-n) matrix that contains values of the inactive variable that + correspond to the given `y` + See Also -------- domains.sample_z - + Notes ----- - The interface for this implementation is written specifically for + The interface for this implementation is written specifically for `domains.sample_z`. """ m, n = W1.shape @@ -626,30 +626,30 @@ def rejection_sampling_z(N, y, W1, W2): Parameters ---------- - N : int + N : int the number of inactive variable samples - y : ndarray + y : ndarray the value of the active variables - W1 : ndarray - m-by-n matrix that contains the eigenvector bases of the n-dimensional + W1 : ndarray + m-by-n matrix that contains the eigenvector bases of the n-dimensional active subspace - W2 : ndarray - m-by-(m-n) matrix that contains the eigenvector bases of the + W2 : ndarray + m-by-(m-n) matrix that contains the eigenvector bases of the (m-n)-dimensional inactive subspace Returns ------- Z : ndarray - N-by-(m-n) matrix that contains values of the inactive variable that - correspond to the given `y` - + N-by-(m-n) matrix that contains values of the inactive variable that + correspond to the given `y` + See Also -------- domains.sample_z - + Notes ----- - The interface for this implementation is written specifically for + The interface for this implementation is written specifically for `domains.sample_z`. """ m, n = W1.shape @@ -682,30 +682,30 @@ def random_walk_z(N, y, W1, W2): Parameters ---------- - N : int + N : int the number of inactive variable samples - y : ndarray + y : ndarray the value of the active variables - W1 : ndarray - m-by-n matrix that contains the eigenvector bases of the n-dimensional + W1 : ndarray + m-by-n matrix that contains the eigenvector bases of the n-dimensional active subspace - W2 : ndarray - m-by-(m-n) matrix that contains the eigenvector bases of the + W2 : ndarray + m-by-(m-n) matrix that contains the eigenvector bases of the (m-n)-dimensional inactive subspace Returns ------- Z : ndarray - N-by-(m-n) matrix that contains values of the inactive variable that - correspond to the given `y` - + N-by-(m-n) matrix that contains values of the inactive variable that + correspond to the given `y` + See Also -------- domains.sample_z - + Notes ----- - The interface for this implementation is written specifically for + The interface for this implementation is written specifically for `domains.sample_z`. """ m, n = W1.shape @@ -745,12 +745,12 @@ def random_walk_z(N, y, W1, W2): def _rotate_x(Y, Z, W): """A convenience function for rotating subspace coordinates to x space. - + """ NY, n = Y.shape N = Z.shape[2] m = n + Z.shape[1] - + YY = np.tile(Y.reshape((NY, n, 1)), (1, 1, N)) YZ = np.concatenate((YY, Z), axis=1).transpose((1, 0, 2)).reshape((m, N*NY)).transpose((1, 0)) X = np.dot(YZ, W.T).reshape((N*NY,m)) diff --git a/active_subspaces/gradients.py b/active_subspaces/gradients.py index a137f70..5e692d2 100644 --- a/active_subspaces/gradients.py +++ b/active_subspaces/gradients.py @@ -5,28 +5,28 @@ def local_linear_gradients(X, f, p=None, weights=None): """Estimate a collection of gradients from input/output pairs. - + Given a set of input/output pairs, choose subsets of neighboring points and build a local linear model for each subset. The gradients of these local linear models comprise estimates of sampled gradients. Parameters ---------- - X : ndarray + X : ndarray M-by-m matrix that contains the m-dimensional inputs - f : ndarray + f : ndarray M-by-1 matrix that contains scalar outputs p : int, optional - how many nearest neighbors to use when constructing the local linear + how many nearest neighbors to use when constructing the local linear model (default 1) weights : ndarray, optional - M-by-1 matrix that contains the weights for each observation (default + M-by-1 matrix that contains the weights for each observation (default None) Returns ------- df : ndarray - M-by-m matrix that contains estimated partial derivatives approximated + M-by-m matrix that contains estimated partial derivatives approximated by the local linear models Notes @@ -44,7 +44,7 @@ def local_linear_gradients(X, f, p=None, weights=None): if p < m+1 or p > M: raise Exception('p must be between m+1 and M') - + if weights is None: weights = np.ones((M, 1)) / M @@ -67,18 +67,18 @@ def finite_difference_gradients(X, fun, h=1e-6): Parameters ---------- - X : ndarray - M-by-m matrix that contains the points to estimate the gradients with + X : ndarray + M-by-m matrix that contains the points to estimate the gradients with finite differences fun : function function that returns the simulation's quantity of interest given inputs - h : float, optional + h : float, optional the finite difference step size (default 1e-6) Returns ------- - df : ndarray - M-by-m matrix that contains estimated partial derivatives approximated + df : ndarray + M-by-m matrix that contains estimated partial derivatives approximated by finite differences """ X, M, m = process_inputs(X) diff --git a/active_subspaces/integrals.py b/active_subspaces/integrals.py index 9c23709..c2ce2fa 100644 --- a/active_subspaces/integrals.py +++ b/active_subspaces/integrals.py @@ -12,30 +12,30 @@ def integrate(fun, avmap, N, NMC=10): """Approximate the integral of a function of m variables. - + Parameters ---------- - fun : function - an interface to the simulation that returns the quantity of interest + fun : function + an interface to the simulation that returns the quantity of interest given inputs as an 1-by-m ndarray - avmap : ActiveVariableMap + avmap : ActiveVariableMap a domains.ActiveVariableMap - N : int + N : int the number of points in the quadrature rule NMC : int, optional - the number of points in the Monte Carlo estimates of the conditional + the number of points in the Monte Carlo estimates of the conditional expectation and conditional variance (default 10) Returns ------- - mu : float - an estimate of the integral of the function computed against the weight + mu : float + an estimate of the integral of the function computed against the weight function on the simulation inputs - lb : float - a central-limit-theorem 95% lower confidence from the Monte Carlo part + lb : float + a central-limit-theorem 95% lower confidence from the Monte Carlo part of the integration - ub : float - a central-limit-theorem 95% upper confidence from the Monte Carlo part + ub : float + a central-limit-theorem 95% upper confidence from the Monte Carlo part of the integration See Also @@ -84,16 +84,16 @@ def av_integrate(avfun, avmap, N): Parameters ---------- - avfun : function + avfun : function a function of the active variables - avmap : ActiveVariableMap + avmap : ActiveVariableMap a domains.ActiveVariableMap - N : int + N : int the number of points in the quadrature rule Returns ------- - mu : float + mu : float an estimate of the integral Notes @@ -117,27 +117,27 @@ def av_integrate(avfun, avmap, N): def quadrature_rule(avmap, N, NMC=10): """Get a quadrature rule on the space of simulation inputs. - + Parameters ---------- - avmap : ActiveVariableMap + avmap : ActiveVariableMap a domains.ActiveVariableMap - N : int + N : int the number of quadrature nodes in the active variables - NMC : int, optional - the number of samples in the simple Monte Carlo over the inactive + NMC : int, optional + the number of samples in the simple Monte Carlo over the inactive variables (default 10) Returns ------- - Xp : ndarray - (N*NMC)-by-m matrix containing the quadrature nodes on the simulation + Xp : ndarray + (N*NMC)-by-m matrix containing the quadrature nodes on the simulation input space - Xw : ndarray - (N*NMC)-by-1 matrix containing the quadrature weights on the simulation + Xw : ndarray + (N*NMC)-by-1 matrix containing the quadrature weights on the simulation input space - ind : ndarray - array of indices identifies which rows of `Xp` correspond to the same + ind : ndarray + array of indices identifies which rows of `Xp` correspond to the same fixed value of the active variables See Also @@ -182,14 +182,14 @@ def av_quadrature_rule(avmap, N): Parameters ---------- - avmap : ActiveVariableMap + avmap : ActiveVariableMap a domains.ActiveVariableMap - N : int + N : int the number of quadrature nodes in the active variables - + Returns ------- - Yp : ndarray + Yp : ndarray quadrature nodes on the active variables Yw : ndarray quadrature weights on the active variables @@ -215,25 +215,25 @@ def av_quadrature_rule(avmap, N): def interval_quadrature_rule(avmap, N, NX=10000): """Quadrature rule on a one-dimensional interval. - + Quadrature when the dimension of the active subspace is 1 and the simulation parameter space is bounded. Parameters ---------- - avmap : ActiveVariableMap + avmap : ActiveVariableMap a domains.ActiveVariableMap - N : int + N : int the number of quadrature nodes in the active variables - NX : int, optional + NX : int, optional the number of samples to use to estimate the quadrature weights (default 10000) Returns ------- - Yp : ndarray + Yp : ndarray quadrature nodes on the active variables - Yw : ndarray + Yw : ndarray quadrature weights on the active variables See Also @@ -261,17 +261,17 @@ def interval_quadrature_rule(avmap, N, NX=10000): def zonotope_quadrature_rule(avmap, N, NX=10000): """Quadrature rule on a zonotope. - + Quadrature when the dimension of the active subspace is greater than 1 and the simulation parameter space is bounded. Parameters ---------- - avmap : ActiveVariableMap + avmap : ActiveVariableMap a domains.ActiveVariableMap - N : int + N : int the number of quadrature nodes in the active variables - NX : int, optional + NX : int, optional the number of samples to use to estimate the quadrature weights (default 10000) @@ -279,7 +279,7 @@ def zonotope_quadrature_rule(avmap, N, NX=10000): ------- Yp : ndarray quadrature nodes on the active variables - Yw : ndarray + Yw : ndarray quadrature weights on the active variables See Also @@ -310,5 +310,3 @@ def zonotope_quadrature_rule(avmap, N, NX=10000): Yp, Yw = points.reshape((T.nsimplex,n)), weights.reshape((T.nsimplex,1)) return Yp, Yw - - diff --git a/active_subspaces/optimizers.py b/active_subspaces/optimizers.py index d91df65..c16c18c 100644 --- a/active_subspaces/optimizers.py +++ b/active_subspaces/optimizers.py @@ -9,7 +9,7 @@ class MinVariableMap(ActiveVariableMap): """ActiveVariableMap for optimization - + This subclass is an domains.ActiveVariableMap specifically for optimization. See Also @@ -30,11 +30,11 @@ def train(self, X, f): Parameters ---------- - X : ndarray - input points used to train a global quadratic used in the + X : ndarray + input points used to train a global quadratic used in the `regularize_z` function - f : ndarray - simulation outputs used to train a global quadratic in the + f : ndarray + simulation outputs used to train a global quadratic in the `regularize_z` function """ @@ -78,16 +78,16 @@ def regularize_z(self, Y, N=1): Parameters ---------- - Y : ndarray + Y : ndarray N-by-n matrix of points in the space of active variables N : int, optional - merely there satisfy the interface of `regularize_z`. It should not + merely there satisfy the interface of `regularize_z`. It should not be anything other than 1 Returns ------- - Z : ndarray - N-by-(m-n)-by-1 matrix that contains a value of the inactive + Z : ndarray + N-by-(m-n)-by-1 matrix that contains a value of the inactive variables for each value of the inactive variables Notes @@ -132,16 +132,16 @@ def regularize_z(self, Y, N=1): Parameters ---------- - Y : ndarray + Y : ndarray N-by-n matrix of points in the space of active variables N : int, optional - merely there satisfy the interface of `regularize_z`. It should not + merely there satisfy the interface of `regularize_z`. It should not be anything other than 1 Returns ------- - Z : ndarray - N-by-(m-n)-by-1 matrix that contains a value of the inactive + Z : ndarray + N-by-(m-n)-by-1 matrix that contains a value of the inactive variables for each value of the inactive variables Notes @@ -166,22 +166,22 @@ def regularize_z(self, Y, N=1): def minimize(asrs, X, f): """Minimize a response surface constructed with the active subspace. - + Parameters ---------- - asrs : ActiveSubspaceResponseSurface + asrs : ActiveSubspaceResponseSurface a trained response_surfaces.ActiveSubspaceResponseSurface - X : ndarray + X : ndarray input points used to train the MinVariableMap - f : ndarray + f : ndarray simulation outputs used to train the MinVariableMap - + Returns ------- - xstar : ndarray + xstar : ndarray the estimated minimizer of the function modeled by the ActiveSubspaceResponseSurface `asrs` - fstar : float + fstar : float the estimated minimum of the function modeled by `asrs` Notes @@ -191,7 +191,7 @@ def minimize(asrs, X, f): a MinVariableMap with the given input/output pairs, which it uses to map the minimizer back to the space of simulation inputs. - This is very heuristic. + This is very heuristic. """ X, f, M, m = process_inputs_outputs(X, f) @@ -223,18 +223,18 @@ def av_minimize(avfun, avdom, avdfun=None): Parameters ---------- - avfun : function + avfun : function a function of the active variables - avdom : ActiveVariableDomain + avdom : ActiveVariableDomain information about the domain of `avfun` - avdfun : function + avdfun : function returns the gradient of `avfun` Returns ------- - ystar : ndarray + ystar : ndarray the estimated minimizer of `avfun` - fstar : float + fstar : float the estimated minimum of `avfun` See Also @@ -262,16 +262,16 @@ def interval_minimize(avfun, avdom): Parameters ---------- - avfun : function + avfun : function a function of the active variables - avdom : ActiveVariableDomain + avdom : ActiveVariableDomain contains information about the domain of `avfun` Returns ------- - ystar : ndarray + ystar : ndarray the estimated minimizer of `avfun` - fstar : float + fstar : float the estimated minimum of `avfun` See Also @@ -294,21 +294,21 @@ def interval_minimize(avfun, avdom): def zonotope_minimize(avfun, avdom, avdfun): """Minimize a response surface defined on a zonotope. - + Parameters ---------- - avfun : function + avfun : function a function of the active variables - avdom : ActiveVariableDomain + avdom : ActiveVariableDomain contains information about the domain of `avfun` - avdfun : function + avdfun : function returns the gradient of `avfun` Returns ------- - ystar : ndarray + ystar : ndarray the estimated minimizer of `avfun` - fstar : float + fstar : float the estimated minimum of `avfun` See Also @@ -350,18 +350,18 @@ def unbounded_minimize(avfun, avdom, avdfun): Parameters ---------- - avfun : function + avfun : function a function of the active variables - avdom : ActiveVariableDomain + avdom : ActiveVariableDomain contains information about the domain of `avfun` - avdfun : function + avdfun : function returns the gradient of `avfun` Returns ------- - ystar : ndarray + ystar : ndarray the estimated minimizer of `avfun` - fstar : float + fstar : float the estimated minimum of `avfun` See Also diff --git a/active_subspaces/response_surfaces.py b/active_subspaces/response_surfaces.py index b7e3e56..535606f 100644 --- a/active_subspaces/response_surfaces.py +++ b/active_subspaces/response_surfaces.py @@ -12,9 +12,9 @@ class ActiveSubspaceResponseSurface(): Attributes ---------- - respsurf : ResponseSurface + respsurf : ResponseSurface `respsurf` is a utils.response_surfaces.ResponseSurface - avmap : ActiveVariableMap + avmap : ActiveVariableMap a domains.ActiveVariableMap Notes @@ -31,12 +31,12 @@ def __init__(self, avmap, respsurf=None): Parameters ---------- - avmap : ActiveVariableMap - a domains.ActiveVariable map that includes the active variable + avmap : ActiveVariableMap + a domains.ActiveVariable map that includes the active variable domain, which includes the active and inactive subspaces respsurf : ResponseSurface, optional - a utils.response_surfaces.ResponseSurface object. If a - ResponseSurface is not given, a default RadialBasisApproximation is + a utils.response_surfaces.ResponseSurface object. If a + ResponseSurface is not given, a default RadialBasisApproximation is used. """ if not isinstance(avmap, ActiveVariableMap): @@ -50,7 +50,7 @@ def __init__(self, avmap, respsurf=None): def _train(self, Y, f, v=None): """Train the radial basis function approximation. - + A private function for training the response surface with a set of active variable and function evaluations. """ @@ -62,15 +62,15 @@ def _train(self, Y, f, v=None): def train_with_data(self, X, f, v=None): """Train the response surface with input/output pairs. - + Parameters ---------- - X : ndarray + X : ndarray M-by-m matrix with evaluations of the simulation inputs - f : ndarray + f : ndarray M-by-1 matrix with corresponding simulation quantities of interest v : ndarray, optional - M-by-1 matrix that contains the regularization (i.e., errors) + M-by-1 matrix that contains the regularization (i.e., errors) associated with `f` (default None) Notes @@ -89,14 +89,14 @@ def train_with_interface(self, fun, N, NMC=10): Parameters ---------- - fun : function - a function that returns the simulation quantity of interest given a + fun : function + a function that returns the simulation quantity of interest given a point in the input space as an 1-by-m ndarray - N : int - the number of points used in the design-of-experiments for + N : int + the number of points used in the design-of-experiments for constructing the response surface - NMC : int, optional - the number of points used to estimate the conditional expectation + NMC : int, optional + the number of points used to estimate the conditional expectation and conditional variance of the function given a value of the active variables @@ -132,25 +132,25 @@ def train_with_interface(self, fun, N, NMC=10): def predict_av(self, Y, compgrad=False): """Evaluate response surface at active variable. - + Compute the value of the response surface given values of the active variables. Parameters ---------- - Y : ndarray - M-by-n matrix containing points in the range of active variables to + Y : ndarray + M-by-n matrix containing points in the range of active variables to evaluate the response surface - compgrad : bool, optional - determines if the gradient of the response surface with respect to + compgrad : bool, optional + determines if the gradient of the response surface with respect to the active variables is computed and returned (default False) Returns ------- - f : ndarray + f : ndarray contains the response surface values at the given `Y` - df : ndarray - contains the response surface gradients at the given `Y`. If + df : ndarray + contains the response surface gradients at the given `Y`. If `compgrad` is False, then `df` is None. """ f, df = self.respsurf.predict(Y, compgrad) @@ -158,19 +158,19 @@ def predict_av(self, Y, compgrad=False): def gradient_av(self, Y): """Compute the gradient with respect to the active variables. - + A convenience function for computing the gradient of the response surface with respect to the active variables. Parameters ---------- - Y : ndarray - M-by-n matrix containing points in the range of active variables to + Y : ndarray + M-by-n matrix containing points in the range of active variables to evaluate the response surface gradient Returns ------- - df : ndarray + df : ndarray contains the response surface gradient at the given `Y` """ df = self.respsurf.predict(Y, compgrad=True)[1] @@ -178,24 +178,24 @@ def gradient_av(self, Y): def predict(self, X, compgrad=False): """Evaluate the response surface at full space points. - + Compute the value of the response surface given values of the simulation variables. Parameters ---------- - X : ndarray + X : ndarray M-by-m matrix containing points in simulation's parameter space - compgrad : bool, optional - determines if the gradient of the response surface is computed and + compgrad : bool, optional + determines if the gradient of the response surface is computed and returned (default False) - + Returns ------- - f : ndarray + f : ndarray contains the response surface values at the given `X` dfdx : ndarray - an ndarray of shape M-by-m that contains the estimated gradient at + an ndarray of shape M-by-m that contains the estimated gradient at the given `X`. If `compgrad` is False, then `dfdx` is None. """ Y = self.avmap.forward(X)[0] @@ -209,18 +209,18 @@ def predict(self, X, compgrad=False): def gradient(self, X): """Gradient of the response surface. - + A convenience function for computing the gradient of the response surface with respect to the simulation inputs. Parameters ---------- - X : ndarray + X : ndarray M-by-m matrix containing points in the space of simulation inputs Returns ------- - df : ndarray + df : ndarray contains the response surface gradient at the given `X` """ return self.predict(X, compgrad=True)[1] @@ -230,32 +230,32 @@ def __call__(self, X): def av_design(avmap, N, NMC=10): """Design on active variable space. - + A wrapper that returns the design for the response surface in the space of the active variables. Parameters ---------- - avmap : ActiveVariableMap - a domains.ActiveVariable map that includes the active variable domain, + avmap : ActiveVariableMap + a domains.ActiveVariable map that includes the active variable domain, which includes the active and inactive subspaces - N : int - the number of points used in the design-of-experiments for constructing + N : int + the number of points used in the design-of-experiments for constructing the response surface NMC : int, optional - the number of points used to estimate the conditional expectation and - conditional variance of the function given a value of the active + the number of points used to estimate the conditional expectation and + conditional variance of the function given a value of the active variables (Default is 10) Returns ------- - Y : ndarray - N-by-n matrix that contains the design points in the space of active + Y : ndarray + N-by-n matrix that contains the design points in the space of active variables - X : ndarray - (N*NMC)-by-m matrix that contains points in the simulation input space + X : ndarray + (N*NMC)-by-m matrix that contains points in the simulation input space to run the simulation - ind : ndarray + ind : ndarray indices that map points in `X` to points in `Y` See Also @@ -295,4 +295,3 @@ def av_design(avmap, N, NMC=10): X, ind = avmap.inverse(Y, NMC) return Y, X, ind - diff --git a/active_subspaces/subspaces.py b/active_subspaces/subspaces.py index 2cdfe70..6e47908 100644 --- a/active_subspaces/subspaces.py +++ b/active_subspaces/subspaces.py @@ -9,10 +9,10 @@ class Subspaces(): """A class for computing active and inactive subspaces. - + Attributes ---------- - eigenvals : ndarray + eigenvals : ndarray m-by-1 matrix of eigenvalues eigenvecs : ndarray m-by-m matrix, eigenvectors oriented column-wise @@ -23,10 +23,10 @@ class Subspaces(): e_br : ndarray m-by-2 matrix, bootstrap ranges for the eigenvalues sub_br : ndarray - m-by-3 matrix, bootstrap ranges (first and third column) and the - mean (second column) of the error in the estimated active subspace + m-by-3 matrix, bootstrap ranges (first and third column) and the + mean (second column) of the error in the estimated active subspace approximated by bootstrap - + Notes ----- The attributes `W1` and `W2` are convenience variables. They are identical @@ -39,27 +39,27 @@ class Subspaces(): def compute(self, X=None, f=None, df=None, weights=None, sstype='AS', ptype='EVG', nboot=0): """Compute the active and inactive subspaces. - - Given input points and corresponding outputs, or given samples of the + + Given input points and corresponding outputs, or given samples of the gradients, estimate an active subspace. This method has four different algorithms for estimating the active subspace: 'AS' is the standard active subspace that requires gradients, 'OLS' uses a global linear - model to estimate a one-dimensional active subspace, 'QPHD' uses a + model to estimate a one-dimensional active subspace, 'QPHD' uses a global quadratic model to estimate subspaces, and 'OPG' uses a set of local linear models computed from subsets of give input/output pairs. - - The function also sets the dimension of the active subspace (and, + + The function also sets the dimension of the active subspace (and, consequently, the dimenison of the inactive subspace). There are three heuristic choices for the dimension of the active subspace. The default is the largest gap in the eigenvalue spectrum, which is 'EVG'. The other - two choices are 'RS', which estimates the error in a low-dimensional - response surface using the eigenvalues and the estimated subspace - errors, and 'LI' which is a heuristic from Bing Li on order - determination. - + two choices are 'RS', which estimates the error in a low-dimensional + response surface using the eigenvalues and the estimated subspace + errors, and 'LI' which is a heuristic from Bing Li on order + determination. + Note that either `df` or `X` and `f` must be given, although formally all are optional. - + Parameters ---------- X : ndarray, optional @@ -73,26 +73,26 @@ def compute(self, X=None, f=None, df=None, weights=None, sstype='AS', ptype='EVG weights : ndarray, optional M-by-1 matrix of weights associated with rows of `X` sstype : str, optional - defines subspace type to compute. Default is 'AS' for active - subspace, which requires `df`. Other options are `OLS` for a global - linear model, `QPHD` for a global quadratic model, and `OPG` for + defines subspace type to compute. Default is 'AS' for active + subspace, which requires `df`. Other options are `OLS` for a global + linear model, `QPHD` for a global quadratic model, and `OPG` for local linear models. The latter three require `X` and `f`. ptype : str, optional - defines the partition type. Default is 'EVG' for largest + defines the partition type. Default is 'EVG' for largest eigenvalue gap. Other options are 'RS', which is an estimate of the response surface error, and 'LI', which is a heuristic proposed by Bing Li based on subspace errors and eigenvalue decay. nboot : int, optional - number of bootstrap samples used to estimate the error in the + number of bootstrap samples used to estimate the error in the estimated subspace (default 0 means no bootstrap estimates) - + Notes ----- Partition type 'RS' and 'LI' require nboot to be greater than 0 (and - probably something more like 100) to get bootstrap estimates of the - subspace error. + probably something more like 100) to get bootstrap estimates of the + subspace error. """ - + # Check inputs if X is not None: X, M, m = process_inputs(X) @@ -100,11 +100,11 @@ def compute(self, X=None, f=None, df=None, weights=None, sstype='AS', ptype='EVG df, M, m = process_inputs(df) else: raise Exception('One of input/output pairs (X,f) or gradients (df) must not be None') - + if weights is None: # default weights is for Monte Carlo weights = np.ones((M, 1)) / M - + # Compute the subspace if sstype == 'AS': if df is None: @@ -130,20 +130,20 @@ def compute(self, X=None, f=None, df=None, weights=None, sstype='AS', ptype='EVG e, W = None, None ssmethod = None raise Exception('Unrecognized subspace type: {}'.format(sstype)) - - self.eigenvals, self.eigenvecs = e, W - + + self.eigenvals, self.eigenvecs = e, W + # Compute bootstrap ranges and partition if nboot > 0: e_br, sub_br, li_F = _bootstrap_ranges(e, W, X, f, df, weights, ssmethod, nboot) else: if ptype == 'RS' or ptype == 'LI': raise Exception('Need to run bootstrap for partition type {}'.format(ptype)) - + e_br, sub_br = None, None - + self.e_br, self.sub_br = e_br, sub_br - + # Compute the partition if ptype == 'EVG': n = eig_partition(e)[0] @@ -154,21 +154,21 @@ def compute(self, X=None, f=None, df=None, weights=None, sstype='AS', ptype='EVG n = ladle_partition(e, li_F)[0] else: raise Exception('Unrecognized partition type: {}'.format(ptype)) - + self.partition(n) def partition(self, n): """Partition the eigenvectors to define the active subspace. - + A convenience function for partitioning the full set of eigenvectors to separate the active from inactive subspaces. - + Parameters ---------- n : int the dimension of the active subspace - + """ if not isinstance(n, int): raise TypeError('n should be an integer') @@ -181,7 +181,7 @@ def partition(self, n): def active_subspace(df, weights): """Compute the active subspace. - + Parameters ---------- df : ndarray @@ -189,7 +189,7 @@ def active_subspace(df, weights): weights : ndarray M-by-1 weight vector, corresponds to numerical quadrature rule used to estimate matrix whose eigenspaces define the active subspace - + Returns ------- e : ndarray @@ -198,15 +198,15 @@ def active_subspace(df, weights): m-by-m orthogonal matrix of eigenvectors """ df, M, m = process_inputs(df) - + # compute the matrix C = np.dot(df.transpose(), df * weights) - + return sorted_eigh(C) def ols_subspace(X, f, weights): """Estimate one-dimensional subspace with global linear model. - + Parameters ---------- X : ndarray @@ -216,42 +216,42 @@ def ols_subspace(X, f, weights): weights : ndarray M-by-1 weight vector, corresponds to numerical quadrature rule used to estimate matrix whose eigenspaces define the active subspace - + Returns ------- e : ndarray m-by-1 vector of eigenvalues W : ndarray m-by-m orthogonal matrix of eigenvectors - + Notes ----- Although the method returns a full set of eigenpairs (to be consistent with the other subspace functions), only the first eigenvalue will be nonzero, - and only the first eigenvector will have any relationship to the input + and only the first eigenvector will have any relationship to the input parameters. The remaining m-1 eigenvectors are only orthogonal to the first. """ X, f, M, m = process_inputs_outputs(X, f) - + # solve weighted least squares A = np.hstack((np.ones((M, 1)), X)) * np.sqrt(weights) b = f * np.sqrt(weights) u = np.linalg.lstsq(A, b)[0] w = u[1:].reshape((m, 1)) - + # compute rank-1 C C = np.dot(w, w.transpose()) - + return sorted_eigh(C) def qphd_subspace(X, f, weights): """Estimate active subspace with global quadratic model. - + This approach is similar to Ker-Chau Li's approach for principal Hessian directions based on a global quadratic model of the data. In contrast to Li's approach, this method uses the average outer product of the gradient of the quadratic model, as opposed to just its Hessian. - + Parameters ---------- X : ndarray @@ -261,7 +261,7 @@ def qphd_subspace(X, f, weights): weights : ndarray M-by-1 weight vector, corresponds to numerical quadrature rule used to estimate matrix whose eigenspaces define the active subspace - + Returns ------- e : ndarray @@ -271,13 +271,13 @@ def qphd_subspace(X, f, weights): """ X, f, M, m = process_inputs_outputs(X, f) - + # check if the points are uniform or Gaussian, set 2nd moment if np.amax(X) > 1.0 or np.amin < -1.0: gamma = 1.0 else: gamma = 1.0 / 3.0 - + # compute a quadratic approximation pr = PolynomialApproximation(2) pr.train(X, f, weights) @@ -289,14 +289,14 @@ def qphd_subspace(X, f, weights): C = np.outer(b, b.transpose()) + gamma*np.dot(A, A.transpose()) return sorted_eigh(C) - + def opg_subspace(X, f, weights): """Estimate active subspace with local linear models. - - This approach is related to the sufficient dimension reduction method known - sometimes as the outer product of gradient method. See the 2001 paper + + This approach is related to the sufficient dimension reduction method known + sometimes as the outer product of gradient method. See the 2001 paper 'Structure adaptive approach for dimension reduction' from Hristache, et al. - + Parameters ---------- X : ndarray @@ -306,7 +306,7 @@ def opg_subspace(X, f, weights): weights : ndarray M-by-1 weight vector, corresponds to numerical quadrature rule used to estimate matrix whose eigenspaces define the active subspace - + Returns ------- e : ndarray @@ -315,24 +315,24 @@ def opg_subspace(X, f, weights): m-by-m orthogonal matrix of eigenvectors """ X, f, M, m = process_inputs_outputs(X, f) - + # Obtain gradient approximations using local linear regressions df = local_linear_gradients(X, f, weights=weights) - + # Use gradient approximations to compute active subspace opg_weights = np.ones((df.shape[0], 1)) / df.shape[0] e, W = active_subspace(df, opg_weights) - + return e, W - + def eig_partition(e): """Partition the active subspace according to largest eigenvalue gap. - + Parameters ---------- e : ndarray m-by-1 vector of eigenvalues - + Returns ------- n : int @@ -349,72 +349,72 @@ def eig_partition(e): def errbnd_partition(e, sub_err): """Partition the active subspace according to response surface error. - - Uses an a priori estimate of the response surface error based on the - eigenvalues and subspace error to determine the active subspace dimension. - + + Uses an a priori estimate of the response surface error based on the + eigenvalues and subspace error to determine the active subspace dimension. + Parameters ---------- e : ndarray m-by-1 vector of eigenvalues sub_err : ndarray m-by-1 vector of estimates of subspace error - - + + Returns ------- n : int dimension of active subspace errbnd : float estimate of error bound - + Notes ----- The error bound should not be used as an estimated error. The bound is only used to estimate the subspace dimension. - + """ m = e.shape[0] - + errbnd = np.zeros((m-1, 1)) for i in range(m-1): errbnd[i] = np.sqrt(np.sum(e[:i+1]))*sub_err[i] + np.sqrt(np.sum(e[i+1:])) - + n = np.argmin(errbnd) + 1 return n, errbnd def ladle_partition(e, li_F): """Partition the active subspace according to Li's criterion. - + Uses a criterion proposed by Bing Li that combines estimates of the subspace - with estimates of the eigenvalues. - + with estimates of the eigenvalues. + Parameters ---------- e : ndarray m-by-1 vector of eigenvalues li_F : float measures error in the subspace - + Returns ------- n : int dimension of active subspace G : ndarray metrics used to determine active subspace dimension - + """ G = li_F + e.reshape((e.size, 1)) / np.sum(e) n = np.argmin(G) + 1 return n, G - + def _bootstrap_ranges(e, W, X, f, df, weights, ssmethod, nboot=100): """Compute bootstrap ranges for eigenvalues and subspaces. - - An implementation of the nonparametric bootstrap that we use in - conjunction with the subspace estimation methods to estimate the errors in + + An implementation of the nonparametric bootstrap that we use in + conjunction with the subspace estimation methods to estimate the errors in the eigenvalues and subspaces. - + Parameters ---------- e : ndarray @@ -434,31 +434,31 @@ def _bootstrap_ranges(e, W, X, f, df, weights, ssmethod, nboot=100): samples nboot : int, optional number of bootstrap samples (default 100) - + Returns ------- e_br : ndarray - m-by-2 matrix, first column contains bootstrap lower bound on - eigenvalues, second column contains bootstrap upper bound on + m-by-2 matrix, first column contains bootstrap lower bound on + eigenvalues, second column contains bootstrap upper bound on eigenvalues sub_br : ndarray - (m-1)-by-3 matrix, first column contains bootstrap lower bound on + (m-1)-by-3 matrix, first column contains bootstrap lower bound on estimated subspace error, second column contains estimated mean of subspace error (a reasonable subspace error estimate), third column contains estimated upper bound on subspace error li_F : float Bing Li's metric for order determination based on determinants - + """ if df is not None: df, M, m = process_inputs(df) else: X, M, m = process_inputs(X) - + e_boot = np.zeros((m, nboot)) sub_dist = np.zeros((m-1, nboot)) sub_det = np.zeros((m-1, nboot)) - + # TODO: should be able to parallelize this for i in range(nboot): X0, f0, df0, weights0 = _bootstrap_replicate(X, f, df, weights) @@ -467,42 +467,42 @@ def _bootstrap_ranges(e, W, X, f, df, weights, ssmethod, nboot=100): for j in range(m-1): sub_dist[j,i] = np.linalg.norm(np.dot(W[:,:j+1].T, W0[:,j+1:]), ord=2) sub_det[j,i] = np.linalg.det(np.dot(W[:,:j+1].T, W0[:,:j+1])) - + # bootstrap ranges for the eigenvalues e_br = np.hstack(( np.amin(e_boot, axis=1).reshape((m, 1)), \ np.amax(e_boot, axis=1).reshape((m, 1)) )) - + # bootstrap ranges and mean for subspace distance sub_br = np.hstack(( np.amin(sub_dist, axis=1).reshape((m-1, 1)), \ np.mean(sub_dist, axis=1).reshape((m-1, 1)), \ np.amax(sub_dist, axis=1).reshape((m-1, 1)) )) - + # metric from Li's ladle plot paper li_F = np.vstack(( np.zeros((1,1)), np.sum(1.0 - np.fabs(sub_det), axis=1).reshape((m-1, 1)) / nboot )) li_F = li_F / np.sum(li_F) return e_br, sub_br, li_F - + def sorted_eigh(C): """Compute eigenpairs and sort. - + Parameters ---------- C : ndarray matrix whose eigenpairs you want - + Returns ------- e : ndarray vector of sorted eigenvalues W : ndarray orthogonal matrix of corresponding eigenvectors - + Notes ----- Eigenvectors are unique up to a sign. We make the choice to normalize the eigenvectors so that the first component of each eigenvector is positive. - This normalization is very helpful for the bootstrapping. + This normalization is very helpful for the bootstrapping. """ e, W = np.linalg.eigh(C) e = abs(e) @@ -513,33 +513,32 @@ def sorted_eigh(C): s[s==0] = 1 W = W*s return e.reshape((e.size,1)), W - + def _bootstrap_replicate(X, f, df, weights): """Return a bootstrap replicate. - + A bootstrap replicate is a sampling-with-replacement strategy from a given - data set. - + data set. + """ M = weights.shape[0] ind = np.random.randint(M, size=(M, )) - + if X is not None: X0 = X[ind,:].copy() else: X0 = None - + if f is not None: f0 = f[ind,:].copy() else: f0 = None - + if df is not None: df0 = df[ind,:].copy() else: df0 = None - + weights0 = weights[ind,:].copy() - + return X0, f0, df0, weights0 - diff --git a/active_subspaces/utils/designs.py b/active_subspaces/utils/designs.py index 8430b28..81e409b 100644 --- a/active_subspaces/utils/designs.py +++ b/active_subspaces/utils/designs.py @@ -20,7 +20,7 @@ def interval_design(a, b, N): Returns ------- design, ndarray - N-by-1 matrix that contains the design points in the interval. It does + N-by-1 matrix that contains the design points in the interval. It does not contain the endpoints. """ y = np.linspace(a, b, N+2) @@ -35,15 +35,15 @@ def maximin_design(vert, N): vert : ndarray the vertices that define the m-dimensional polytope. The shape of `vert` is M-by-m, where M is the number of vertices. - N : int + N : int the number of points in the design Returns ------- design : ndarray - N-by-m matrix that contains the design points in the polytope. It does + N-by-m matrix that contains the design points in the polytope. It does not contain the vertices. - + Notes ----- The objective function used to find the design is the negative of the @@ -109,7 +109,7 @@ def _maximin_design_obj(y, vert=None): Parameters ---------- y : ndarray - contains the coordinates of the points in the design. If there are N + contains the coordinates of the points in the design. If there are N points in n dimensions then `y` is shape ((Nn, )). vert : ndarray, optional contains the fixed vertices defining the zonotope @@ -141,7 +141,7 @@ def _maximin_design_grad(y, vert=None): Parameters ---------- y : ndarray - contains the coordinates of the points in the design. If there are N + contains the coordinates of the points in the design. If there are N points in n dimensions then `y` is shape ((Nn, )). vert : ndarray contains the fixed vertices defining the zonotope diff --git a/active_subspaces/utils/misc.py b/active_subspaces/utils/misc.py index 6df69ab..56c37ca 100644 --- a/active_subspaces/utils/misc.py +++ b/active_subspaces/utils/misc.py @@ -3,7 +3,7 @@ class Normalizer(): """An abstract class for normalizing inputs. - + """ def normalize(self, X): """Return corresponding points in normalized domain. @@ -17,7 +17,7 @@ def normalize(self, X): ------- X_norm : ndarray contains the normalized inputs corresponding to `X` - + Notes ----- Points in `X` should be oriented as an m-by-n ndarray, where each row @@ -30,14 +30,14 @@ def unnormalize(self, X): Parameters ---------- - X : ndarray + X : ndarray contains all input points one wishes to unnormalize Returns ------- - X_unnorm : ndarray + X_unnorm : ndarray contains the unnormalized inputs corresponding to `X` - + Notes ----- Points in `X` should be oriented as an m-by-n ndarray, where each row @@ -46,15 +46,15 @@ def unnormalize(self, X): raise NotImplementedError() class BoundedNormalizer(Normalizer): - """A class for normalizing bounded inputs. - + """A class for normalizing bounded inputs. + Attributes ---------- lb : ndarray - a matrix of size m-by-1 that contains lower bounds on the simulation + a matrix of size m-by-1 that contains lower bounds on the simulation inputs ub : ndarray - a matrix of size m-by-1 that contains upper bounds on the simulation + a matrix of size m-by-1 that contains upper bounds on the simulation inputs See Also @@ -85,14 +85,14 @@ def normalize(self, X): Parameters ---------- X : ndarray - contains all input points one wishes to normalize. The shape of `X` - is M-by-m. The components of each row of `X` should be between `lb` + contains all input points one wishes to normalize. The shape of `X` + is M-by-m. The components of each row of `X` should be between `lb` and `ub`. Returns ------- X_norm : ndarray - contains the normalized inputs corresponding to `X`. The components + contains the normalized inputs corresponding to `X`. The components of each row of `X_norm` should be between -1 and 1. """ X, M, m = process_inputs(X) @@ -104,16 +104,16 @@ def unnormalize(self, X): Parameters ---------- - X : ndarray - contains all input points one wishes to unnormalize. The shape of - `X` is M-by-m. The components of each row of `X` should be between + X : ndarray + contains all input points one wishes to unnormalize. The shape of + `X` is M-by-m. The components of each row of `X` should be between -1 and 1. Returns ------- - X_unnorm : ndarray - contains the unnormalized inputs corresponding to `X`. The - components of each row of `X_unnorm` should be between `lb` and + X_unnorm : ndarray + contains the unnormalized inputs corresponding to `X`. The + components of each row of `X_unnorm` should be between `lb` and `ub`. """ X, M, m = process_inputs(X) @@ -122,13 +122,13 @@ def unnormalize(self, X): class UnboundedNormalizer(Normalizer): """A class for normalizing unbounded, Gaussian inputs to standard normals. - + Attributes ---------- - mu : ndarray - a matrix of size m-by-1 that contains the mean of the Gaussian + mu : ndarray + a matrix of size m-by-1 that contains the mean of the Gaussian simulation inputs - L : ndarray + L : ndarray a matrix size m-by-m that contains the Cholesky factor of the covariance matrix of the Gaussian simulation inputs. @@ -149,11 +149,11 @@ def __init__(self, mu, C): Parameters ---------- - mu : ndarray - a matrix of size m-by-1 that contains the mean of the Gaussian + mu : ndarray + a matrix of size m-by-1 that contains the mean of the Gaussian simulation inputs - C : ndarray - a matrix of size m-by-m that contains the covariance matrix of the + C : ndarray + a matrix of size m-by-m that contains the covariance matrix of the Gaussian simulation inputs """ self.mu = mu.reshape((1, mu.size)) @@ -164,15 +164,15 @@ def normalize(self, X): Parameters ---------- - X : ndarray - contains all input points one wishes to normalize. The shape of `X` + X : ndarray + contains all input points one wishes to normalize. The shape of `X` is M-by-m. The components of each row of `X` should be a draw from a Gaussian with mean `mu` and covariance `C`. - + Returns ------- - X_norm : ndarray - contains the normalized inputs corresponding to `X`. The components + X_norm : ndarray + contains the normalized inputs corresponding to `X`. The components of each row of `X_norm` should be draws from a standard multivariate normal distribution. """ @@ -183,22 +183,22 @@ def normalize(self, X): def unnormalize(self, X): """Transform points to original Gaussian. - + Return corresponding points transformed to draws from a Gaussian distribution with mean `mu` and covariance `C`. Parameters ---------- - X : ndarray - contains all input points one wishes to unnormalize. The shape of - `X` is M-by-m. The components of each row of `X` should be draws + X : ndarray + contains all input points one wishes to unnormalize. The shape of + `X` is M-by-m. The components of each row of `X` should be draws from a standard multivariate normal. - + Returns ------- X_unnorm : ndarray - contains the unnormalized inputs corresponding to `X`. The - components of each row of `X_unnorm` should represent draws from a + contains the unnormalized inputs corresponding to `X`. The + components of each row of `X_unnorm` should represent draws from a multivariate normal with mean `mu` and covariance `C`. """ X, M, m = process_inputs(X) @@ -211,7 +211,7 @@ def process_inputs(X): Parameters ---------- - X : ndarray + X : ndarray contains input points. The shape of `X` should be M-by-m. Returns @@ -220,7 +220,7 @@ def process_inputs(X): the same as the input M : int number of rows in `X` - m : int + m : int number of columns in `X` """ if len(X.shape) == 2: @@ -236,7 +236,7 @@ def process_inputs_outputs(X, f): Parameters ---------- - X : ndarray + X : ndarray contains input points. The shape of `X` should be M-by-m. f : ndarray M-by-1 matrix @@ -249,7 +249,7 @@ def process_inputs_outputs(X, f): the same as the output M : int number of rows in `X` - m : int + m : int number of columns in `X` """ X, M, m = process_inputs(X) @@ -271,13 +271,13 @@ def process_inputs_outputs(X, f): def conditional_expectations(f, ind): """Compute conditional expectations and variances for given function values. - + Parameters ---------- f : ndarray an ndarry of function evaluations ind : ndarray[int] - index array that tells which values of `f` correspond to the same value + index array that tells which values of `f` correspond to the same value for the active variable. Returns @@ -306,7 +306,7 @@ def conditional_expectations(f, ind): # thanks to Trent for these functions!!! def atleast_2d_col(A): """Wrapper for `atleast_2d(A, 'col')` - + Notes ----- Thanks to Trent Lukaczyk for these functions! @@ -315,7 +315,7 @@ def atleast_2d_col(A): def atleast_2d_row(A): """Wrapper for `atleast_2d(A, 'row')` - + Notes ----- Thanks to Trent Lukaczyk for these functions! @@ -330,7 +330,7 @@ def atleast_2d(A, oned_as='row'): A : ndarray matrix oned_as : str, optional - should be either 'row' or 'col'. It determines whether the array `A` + should be either 'row' or 'col'. It determines whether the array `A` should be expanded as a 2d row or 2d column (default 'row') """ @@ -351,8 +351,3 @@ def atleast_2d(A, oned_as='row'): raise Exception , "oned_as must be 'row' or 'col' " return A - - - - - diff --git a/active_subspaces/utils/plotters.py b/active_subspaces/utils/plotters.py index 4c04d1b..2ffaede 100644 --- a/active_subspaces/utils/plotters.py +++ b/active_subspaces/utils/plotters.py @@ -13,14 +13,14 @@ def plot_opts(savefigs=True, figtype='.eps'): ---------- savefigs : bool save figures into a separate figs director - figtype : str + figtype : str a file extention for the type of image to save - + Returns ------- - opts : dict - the chosen options. The keys in the dictionary are `figtype`, - `savefigs`, and `font`. The `font` is a dictionary that sets the font + opts : dict + the chosen options. The keys in the dictionary are `figtype`, + `savefigs`, and `font`. The `font` is a dictionary that sets the font properties of the figures. """ @@ -48,7 +48,7 @@ def eigenvalues(e, e_br=None, out_label=None, opts=None): e : ndarray k-by-1 matrix that contains the estimated eigenvalues e_br : ndarray, optional - lower and upper bounds for the estimated eigenvalues. These are + lower and upper bounds for the estimated eigenvalues. These are typically computed with a bootstrap. (default None) out_label : str, optional a label for the quantity of interest (default None) @@ -96,9 +96,9 @@ def subspace_errors(sub_br ,out_label=None, opts=None): sub_br : ndarray (k-1)-by-3 matix that contains the lower bound, mean, and upper bound of the subspace errors for each dimension of subspace. - out_label : str, optional + out_label : str, optional a label for the quantity of interest (default None) - opts : dict, optional + opts : dict, optional a dictionary with some plot options (default None) See Also @@ -136,12 +136,12 @@ def eigenvectors(W, W_br=None, in_labels=None, out_label=None, opts=None): Parameters ---------- W : ndarray - m-by-k matrix that contains k of the estimated eigenvectors from the + m-by-k matrix that contains k of the estimated eigenvectors from the active subspace analysis. W_br : ndarray, optional - m-by-(2*k) matrix that contains estimated upper and lower bounds on the + m-by-(2*k) matrix that contains estimated upper and lower bounds on the components of the eigenvectors (default None) - in_labels : str[], optional + in_labels : str[], optional list of labels for the simulation's inputs (default None) out_label : str, optional a label for the quantity of interest (default None) @@ -328,13 +328,13 @@ def sufficient_summary(y, f, out_label=None, opts=None): Parameters ---------- y : ndarray - M-by-1 or M-by-2 matrix that contains the values of the predictors for + M-by-1 or M-by-2 matrix that contains the values of the predictors for the summary plot. f : ndarray M-by-1 matrix that contains the corresponding responses - out_label : str, optional + out_label : str, optional a label for the quantity of interest (default None) - opts : dict, optional + opts : dict, optional a dictionary with some plot options (default None) Notes @@ -397,23 +397,23 @@ def zonotope_2d_plot(vertices, design=None, y=None, f=None, out_label=None, opts Parameters ---------- - vertices : ndarray + vertices : ndarray M-by-2 matrix that contains the vertices that define the zonotope design : ndarray, optional N-by-2 matrix that contains a design-of-experiments on the zonotope. The - plot will contain the Delaunay triangulation of the points in `design` + plot will contain the Delaunay triangulation of the points in `design` and `vertices`. (default None) - y : ndarray, optional + y : ndarray, optional K-by-2 matrix that contains points to be plotted inside the zonotope. If `y` is given, then `f` must be given, too. (default None) f: ndarray, optional - K-by-1 matrix that contains a color value for the associated points in - `y`. This is useful for plotting function values or quadrature rules - with the zonotope. If `f` is given, then `y` must be given, too. + K-by-1 matrix that contains a color value for the associated points in + `y`. This is useful for plotting function values or quadrature rules + with the zonotope. If `f` is given, then `y` must be given, too. (default None) - out_label : str, optional + out_label : str, optional a label for the quantity of interest (default None) - opts : dict, optional + opts : dict, optional a dictionary with some plot options (default None) Notes diff --git a/active_subspaces/utils/qp_solver.py b/active_subspaces/utils/qp_solver.py index 9ab00ca..37ce56c 100644 --- a/active_subspaces/utils/qp_solver.py +++ b/active_subspaces/utils/qp_solver.py @@ -19,7 +19,7 @@ class QPSolver(): Attributes ---------- - solver : str + solver : str identifies which linear program software to use Notes @@ -35,8 +35,8 @@ def __init__(self, solver='GUROBI'): Parameters ---------- - solver : str, optional - identifies which linear program software to use. Options are + solver : str, optional + identifies which linear program software to use. Options are 'GUROBI' and 'SCIPY'. (default 'GUROBI') """ @@ -59,13 +59,13 @@ def linear_program_eq(self, c, A, b, lb, ub): Parameters ---------- - c : ndarray + c : ndarray m-by-1 matrix for the linear objective function - A : ndarray - M-by-m matrix that contains the coefficients of the linear equality + A : ndarray + M-by-m matrix that contains the coefficients of the linear equality constraints b : ndarray - M-by-1 matrix that is the right hand side of the equality + M-by-1 matrix that is the right hand side of the equality constraints lb : ndarray m-by-1 matrix that contains the lower bounds on the variables @@ -97,17 +97,17 @@ def linear_program_ineq(self, c, A, b): c : ndarray m-by-1 matrix for the linear objective function A : ndarray - M-by-m matrix that contains the coefficients of the linear equality + M-by-m matrix that contains the coefficients of the linear equality constraints - b : ndarray - size M-by-1 matrix that is the right hand side of the equality + b : ndarray + size M-by-1 matrix that is the right hand side of the equality constraints Returns ------- x : ndarray m-by-1 matrix that is the minimizer of the linear program - + """ if self.solver == solver_SCIPY: return _scipy_linear_program_ineq(c, A, b) @@ -127,10 +127,10 @@ def quadratic_program_bnd(self, c, Q, lb, ub): Parameters ---------- c : ndarray - m-by-1 matrix that contains the coefficients of the linear term in + m-by-1 matrix that contains the coefficients of the linear term in the objective function Q : ndarray - m-by-m matrix that contains the coefficients of the quadratic term + m-by-m matrix that contains the coefficients of the quadratic term in the objective function lb : ndarray m-by-1 matrix that contains the lower bounds on the variables @@ -139,9 +139,9 @@ def quadratic_program_bnd(self, c, Q, lb, ub): Returns ------- - x : ndarray + x : ndarray m-by-1 matrix that is the minimizer of the quadratic program - + """ if self.solver == solver_SCIPY: return _scipy_quadratic_program_bnd(c, Q, lb, ub) @@ -153,7 +153,7 @@ def quadratic_program_bnd(self, c, Q, lb, ub): def quadratic_program_ineq(self, c, Q, A, b): """Solves an inequality constrained quadratic program. - + This method returns the minimizer of the following quadratic program. minimize c^T x + x^T Q x @@ -162,16 +162,16 @@ def quadratic_program_ineq(self, c, Q, A, b): Parameters ---------- c : ndarray - m-by-1 matrix that contains the coefficients of the linear term in + m-by-1 matrix that contains the coefficients of the linear term in the objective function Q : ndarray - m-by-m matrix that contains the coefficients of the quadratic term + m-by-m matrix that contains the coefficients of the quadratic term in the objective function - A : ndarray - M-by-m matrix that contains the coefficients of the linear equality + A : ndarray + M-by-m matrix that contains the coefficients of the linear equality constraints b : ndarray - M-by-1 matrix that is the right hand side of the equality + M-by-1 matrix that is the right hand side of the equality constraints Returns diff --git a/active_subspaces/utils/quadrature.py b/active_subspaces/utils/quadrature.py index 35a8ca4..8ed6f26 100644 --- a/active_subspaces/utils/quadrature.py +++ b/active_subspaces/utils/quadrature.py @@ -11,12 +11,12 @@ def r_hermite(N): Parameters ---------- - N : int + N : int the number of recurrence coefficients Returns ------- - ab : ndarray + ab : ndarray an `N`-by-2 array of the recurrence coefficients See Also @@ -44,13 +44,13 @@ def r_hermite(N): a = np.zeros(b.shape) ab = np.hstack((a, b)) return ab - + def r_jacobi(N,l,r,a,b): """Recurrence coefficients for the Legendre orthogonal polynomials. Parameters ---------- - N : int + N : int the number of recurrence coefficients l : float the left endpoint of the interval @@ -63,7 +63,7 @@ def r_jacobi(N,l,r,a,b): Returns ------- - ab : ndarray + ab : ndarray an `N`-by-2 array of the recurrence coefficients See Also @@ -82,16 +82,16 @@ def r_jacobi(N,l,r,a,b): if N <= 0: raise ValueError('Parameters out of range.') - + a0 = (b-a)/(a+b+2.0) ab = np.zeros((N+1,2)) b2a2 = b**2 - a**2 s, o = (r-l)/2.0, l + (r-l)/2.0 - + # first row ab[0,0] = s*a0 + o ab[0,1] = 1 - + for k in range(1,N+1): ab[k,0] = s*b2a2/((2*(k)+a+b)*(2*(k+1) + a+b)) + o if k==1: @@ -110,8 +110,8 @@ def jacobi_matrix(ab): Returns ------- - J : ndarray - (N-1)-by-(N-1) symmetric, tridiagonal Jacobi matrix associated with the + J : ndarray + (N-1)-by-(N-1) symmetric, tridiagonal Jacobi matrix associated with the orthogonal polynomials See Also @@ -151,14 +151,14 @@ def gl1d(N): Parameters ---------- - N : int + N : int number of nodes in the quadrature rule Returns ------- - x : ndarray + x : ndarray N-by-1 array of quadrature nodes - w : ndarray + w : ndarray N-by-1 array of quadrature weights See Also @@ -172,7 +172,7 @@ def gl1d(N): """ return g1d(N, 'Legendre') - + def gh1d(N): """One-dimensional Gauss-Hermite quadrature rule. @@ -207,14 +207,14 @@ def g1d(N, quadtype): ---------- N : int number of nodes in the quadrature rule - quadtype : str + quadtype : str type of quadrature rule {'Legendre', 'Hermite'} Returns ------- - x : ndarray + x : ndarray N-by-1 array of quadrature nodes - w : ndarray + w : ndarray N-by-1 array of quadrature weights See Also @@ -234,7 +234,7 @@ def g1d(N, quadtype): ab = r_jacobi(N, -1, 1, 0, 0) else: raise ValueError('quadtype must be Legendre or Hermite') - + J = jacobi_matrix(ab) e, V = np.linalg.eig(J) ind = np.argsort(e) @@ -250,14 +250,14 @@ def gauss_hermite(N): Parameters ---------- - N : int[] + N : int[] number of nodes in each dimension of the quadrature rule Returns ------- - x : ndarray + x : ndarray N-by-1 array of quadrature nodes - w : ndarray + w : ndarray N-by-1 array of quadrature weights Notes @@ -293,14 +293,14 @@ def gauss_legendre(N): Parameters ---------- - N : int[] + N : int[] number of nodes in each dimension of the quadrature rule Returns ------- x : ndarray N-by-1 array of quadrature nodes - w : ndarray + w : ndarray N-by-1 array of quadrature weights Notes diff --git a/active_subspaces/utils/response_surfaces.py b/active_subspaces/utils/response_surfaces.py index b720b5d..211218b 100644 --- a/active_subspaces/utils/response_surfaces.py +++ b/active_subspaces/utils/response_surfaces.py @@ -14,10 +14,10 @@ class ResponseSurface(): Rsqr : float the R-squared coefficient for the response surface X : ndarray - an ndarray of training points for the response surface. The shape is + an ndarray of training points for the response surface. The shape is M-by-m, where m is the number of dimensions. f : ndarray - an ndarray of function values used to train the response surface. The + an ndarray of function values used to train the response surface. The shape of `f` is M-by-1. See Also @@ -50,14 +50,14 @@ class PolynomialApproximation(ResponseSurface): Attributes ---------- poly_weights : ndarray - an ndarray of coefficients for the polynomial approximation in the + an ndarray of coefficients for the polynomial approximation in the monomial basis g : ndarray - contains the m coefficients corresponding to the degree 1 monomials in - the polynomial approximation + contains the m coefficients corresponding to the degree 1 monomials in + the polynomial approximation H : ndarray - an ndarray of shape m-by-m that contains the coefficients of the degree - 2 monomials in the approximation + an ndarray of shape m-by-m that contains the coefficients of the degree + 2 monomials in the approximation See Also -------- @@ -77,18 +77,18 @@ def train(self, X, f, weights=None): Parameters ---------- X : ndarray - an ndarray of training points for the polynomial approximation. The + an ndarray of training points for the polynomial approximation. The shape is M-by-m, where m is the number of dimensions. f : ndarray - an ndarray of function values used to train the polynomial + an ndarray of function values used to train the polynomial approximation. The shape of `f` is M-by-1. - weights : ndarray, optional + weights : ndarray, optional an ndarray of weights for the least-squares. (default is None, which means uniform weights) Notes ----- - This method sets all the attributes of the class for use in the + This method sets all the attributes of the class for use in the `predict` method. """ X, f, M, m = process_inputs_outputs(X, f) @@ -132,19 +132,19 @@ def predict(self, X, compgrad=False): Parameters ---------- X : ndarray - an ndarray of points to evaluate the polynomial approximation. The + an ndarray of points to evaluate the polynomial approximation. The shape is M-by-m, where m is the number of dimensions. - compgrad : bool, optional - a flag to decide whether or not to compute the gradient of the + compgrad : bool, optional + a flag to decide whether or not to compute the gradient of the polynomial approximation at the points `X`. (default False) - + Returns ------- - f : ndarray - an ndarray of predictions from the polynomial approximation. The + f : ndarray + an ndarray of predictions from the polynomial approximation. The shape of `f` is M-by-1. - df : ndarray - an ndarray of gradient predictions from the polynomial + df : ndarray + an ndarray of gradient predictions from the polynomial approximation. The shape of `df` is M-by-m. """ X, M, m = process_inputs(X) @@ -165,23 +165,23 @@ def predict(self, X, compgrad=False): class RadialBasisApproximation(ResponseSurface): """Approximate a multivariate function with a radial basis. - + A class for global, multivariate radial basis approximation with anisotropic squared-exponential radial basis and a weighted-least-squares-fit monomial basis. Attributes ---------- - radial_weights : ndarray + radial_weights : ndarray an ndarray of coefficients radial basis functions in the model - poly_weights : poly_weights - an ndarray of coefficients for the polynomial approximation in the + poly_weights : poly_weights + an ndarray of coefficients for the polynomial approximation in the monomial basis K : ndarray - an ndarray of shape M-by-M that contains the matrix of radial basis + an ndarray of shape M-by-M that contains the matrix of radial basis functions evaluated at the training points - ell : ndarray - an ndarray of shape m-by-1 that contains the characteristic length + ell : ndarray + an ndarray of shape m-by-1 that contains the characteristic length scales along each of the inputs See Also @@ -202,17 +202,17 @@ def train(self, X, f, v=None, e=None): Parameters ---------- X : ndarray - an ndarray of training points for the polynomial approximation. The + an ndarray of training points for the polynomial approximation. The shape is M-by-m, where m is the number of dimensions. f : ndarray - an ndarray of function values used to train the polynomial + an ndarray of function values used to train the polynomial approximation. The shape of `f` is M-by-1. v : ndarray, optional - contains the regularization parameters that model error in the + contains the regularization parameters that model error in the function values (default None) e : ndarray, optional - an ndarray containing the eigenvalues from the active subspace - analysis. If present, the radial basis uses it to determine the + an ndarray containing the eigenvalues from the active subspace + analysis. If present, the radial basis uses it to determine the appropriate anisotropy in the length scales. (default None) Notes @@ -250,10 +250,10 @@ def train(self, X, f, v=None, e=None): ell = g*np.sum(e)/e[:m] if v is None: v = g*np.sum(e[m:])*np.ones(f.shape) - + # ensure conditioning v = np.amax([v.reshape(f.shape), 1e-6*np.ones(f.shape)], axis=0) - + # covariance matrix of observations K = exponential_squared(X, X, 1.0, ell) K += np.diag(v.reshape((M,))) @@ -280,19 +280,19 @@ def predict(self, X, compgrad=False): Parameters ---------- X : ndarray - an ndarray of points to evaluate the polynomial approximation. The + an ndarray of points to evaluate the polynomial approximation. The shape is M-by-m, where m is the number of dimensions. - compgrad : bool, optional - a flag to decide whether or not to compute the gradient of the + compgrad : bool, optional + a flag to decide whether or not to compute the gradient of the polynomial approximation at the points `X`. (default False) - + Returns ------- - f : ndarray - an ndarray of predictions from the polynomial approximation. The + f : ndarray + an ndarray of predictions from the polynomial approximation. The shape of `f` is M-by-1. - df : ndarray - an ndarray of gradient predictions from the polynomial + df : ndarray + an ndarray of gradient predictions from the polynomial approximation. The shape of `df` is M-by-m. Notes @@ -332,13 +332,13 @@ def _rbf_objective(log10g, X, f, v, N, e): Parameters ---------- - log10g : float + log10g : float the log of the scaling factor for the rbf shape parameters X : ndarray the ndarray of training points f : ndarray the ndarray of training data - v : ndarray + v : ndarray contains the regularization parameters for the training data N : int the order of polynomial approximation @@ -347,9 +347,9 @@ def _rbf_objective(log10g, X, f, v, N, e): Returns ------- - r : float - objective function value. If you were training a Gaussian process, it - would be the negative log likelihood. In this context, it's just a + r : float + objective function value. If you were training a Gaussian process, it + would be the negative log likelihood. In this context, it's just a heuristic. """ # TODO: I can probably make this implementation more efficient, but as of @@ -382,7 +382,7 @@ def _rbf_objective(log10g, X, f, v, N, e): # variance sig2 = np.max([np.dot(res.T, np.linalg.solve(K, res))/M, 5*np.finfo(float).eps]) - + r = np.sum(np.log(np.diag(L))) + M*np.log(sig2) return r @@ -403,8 +403,8 @@ def exponential_squared(X1, X2, sigma, ell): Returns ------- - C : ndarray - the matrix of radial functions centered at `X1` and evaluated at `X2`. + C : ndarray + the matrix of radial functions centered at `X1` and evaluated at `X2`. The shape of `C` is `X1.shape[0]`-by-`X2.shape[0]`. """ m = X1.shape[0] @@ -433,10 +433,10 @@ def grad_exponential_squared(X1, X2, sigma, ell): Returns ------- - dC : ndarray - the matrix of radial function gradients centered at `X1` and evaluated - at `X2`. The shape of `dC` is `X1.shape[0]`-by-`X2.shape[0]`-by-m. `dC` - is a three-dimensional ndarray. The third dimension indexes the partial + dC : ndarray + the matrix of radial function gradients centered at `X1` and evaluated + at `X2`. The shape of `dC` is `X1.shape[0]`-by-`X2.shape[0]`-by-m. `dC` + is a three-dimensional ndarray. The third dimension indexes the partial derivatives in each gradient. """ m, d = X1.shape @@ -455,17 +455,17 @@ def polynomial_bases(X, N): Parameters ---------- - X : ndarray + X : ndarray contains the points to evaluate the monomials N : int the maximum degree of the monomial basis Returns ------- - B : ndarray + B : ndarray contains the monomial evaluations - I : ndarray - contains the multi-indices that tell the degree of each univariate + I : ndarray + contains the multi-indices that tell the degree of each univariate monomial term in the multivariate monomial """ M, m = X.shape @@ -483,7 +483,7 @@ def grad_polynomial_bases(X, N): Parameters ---------- - X : ndarray + X : ndarray contains the points to evaluate the monomials N : int the maximum degree of the monomial basis @@ -491,7 +491,7 @@ def grad_polynomial_bases(X, N): Returns ------- dB : ndarray - contains the gradients of the monomials evaluate at `X`. `dB` is a + contains the gradients of the monomials evaluate at `X`. `dB` is a three-dimensional ndarray. The third dimension indexes the partial derivatives in each gradient. diff --git a/active_subspaces/utils/simrunners.py b/active_subspaces/utils/simrunners.py index 737b9c8..4c87120 100644 --- a/active_subspaces/utils/simrunners.py +++ b/active_subspaces/utils/simrunners.py @@ -9,7 +9,7 @@ class SimulationRunner(): Attributes ---------- - fun : function + fun : function runs the simulation for a fixed value of the input parameters, given as an ndarray @@ -30,10 +30,10 @@ def __init__(self, fun): Parameters ---------- - fun : function - a function that runs the simulation for a fixed value of the input - parameters, given as an ndarray. This function returns the quantity - of interest from the model. Often, this function is a wrapper to a + fun : function + a function that runs the simulation for a fixed value of the input + parameters, given as an ndarray. This function returns the quantity + of interest from the model. Often, this function is a wrapper to a larger simulation code. """ if not hasattr(fun, '__call__'): @@ -43,18 +43,18 @@ def __init__(self, fun): def run(self, X): """Run the simulation at several input values. - + Parameters ---------- X : ndarray contains all input points where one wishes to run the simulation. If - the simulation takes m inputs, then `X` must have shape M-by-m, + the simulation takes m inputs, then `X` must have shape M-by-m, where M is the number of simulations to run. Returns ------- - F : ndarray - contains the simulation output at each given input point. The shape + F : ndarray + contains the simulation output at each given input point. The shape of `F` is M-by-1. Notes @@ -73,10 +73,10 @@ def run(self, X): # TODO: provide some timing information # start = time.time() - + for i in range(M): F[i] = self.fun(X[i,:].reshape((1,m))) - + # TODO: provide some timing information # end = time.time() - start @@ -84,15 +84,15 @@ def run(self, X): class SimulationGradientRunner(): """Evaluates gradients at several input values. - - + + A class for running several simulations at different input values that return the gradients of the quantity of interest. Attributes ---------- - dfun : function - a function that runs the simulation for a fixed value of the input + dfun : function + a function that runs the simulation for a fixed value of the input parameters, given as an ndarray. It returns the gradient of the quantity of interest at the given input. @@ -114,9 +114,9 @@ def __init__(self, dfun): Parameters ---------- - dfun : function - a function that runs the simulation for a fixed value of the input - parameters, given as an ndarray. It returns the gradient of the + dfun : function + a function that runs the simulation for a fixed value of the input + parameters, given as an ndarray. It returns the gradient of the quantity of interest at the given input. """ if not hasattr(dfun, '__call__'): @@ -126,21 +126,21 @@ def __init__(self, dfun): def run(self, X): """Run at several input values. - + Run the simulation at several input values and return the gradients of the quantity of interest. Parameters ---------- - X : ndarray - contains all input points where one wishes to run the simulation. - If the simulation takes m inputs, then `X` must have shape M-by-m, + X : ndarray + contains all input points where one wishes to run the simulation. + If the simulation takes m inputs, then `X` must have shape M-by-m, where M is the number of simulations to run. Returns ------- - dF : ndarray - contains the gradient of the quantity of interest at each given + dF : ndarray + contains the gradient of the quantity of interest at each given input point. The shape of `dF` is M-by-m. Notes @@ -159,11 +159,11 @@ def run(self, X): # TODO: provide some timing information # start = time.time() - + for i in range(M): df = self.dfun(X[i,:].reshape((1,m))) dF[i,:] = df.reshape((1,m)) - + # TODO: provide some timing information # end = time.time() - start diff --git a/tests/test_as_optimizers.py b/tests/test_as_optimizers.py index 341a49b..ca6ef70 100644 --- a/tests/test_as_optimizers.py +++ b/tests/test_as_optimizers.py @@ -30,7 +30,7 @@ def test_rs_ubnd_int(self): x = X0[i,:] f0[i,0] = self.quad_fun(x) df0[i,:] = self.quad_dfun(x).reshape((3, )) - + sub = ss.Subspaces() sub.compute(df=df0) sub.partition(1) diff --git a/tests/test_domains.py b/tests/test_domains.py index ec572be..4615563 100644 --- a/tests/test_domains.py +++ b/tests/test_domains.py @@ -6,11 +6,11 @@ import pdb class TestDomains(TestCase): - + def test_unbounded_active_variable_domain(self): np.random.seed(42) df = np.random.normal(size=(10,3)) - + sub = ss.Subspaces() sub.compute(df=df) uavd = dom.UnboundedActiveVariableDomain(sub) @@ -128,7 +128,7 @@ def test_bounded_active_variable_map_1(self): Y,Z = bavm.forward(X) X0 = np.dot(Y, sub.W1.T) + np.dot(Z, sub.W2.T) np.testing.assert_almost_equal(X0, X) - + def test_bounded_active_variable_map_2(self): np.random.seed(42) df0 = np.random.normal(size=(10,3)) @@ -145,7 +145,7 @@ def test_bounded_active_variable_map_2(self): X0 = bavm.inverse(Y, N=10)[0] np.testing.assert_almost_equal(np.dot(X0, sub.W1), np.kron(Y, np.ones((10,1))) ) np.testing.assert_equal(np.floor(np.abs(X0)), np.zeros(X0.shape)) - + def test_bounded_active_variable_map_3(self): np.random.seed(42) df0 = np.random.normal(size=(10,3)) @@ -153,7 +153,7 @@ def test_bounded_active_variable_map_3(self): sub = ss.Subspaces() sub.compute(df=df0) m, n = sub.W1.shape - + bavd = dom.BoundedActiveVariableDomain(sub) bavm = dom.BoundedActiveVariableMap(bavd) @@ -162,7 +162,7 @@ def test_bounded_active_variable_map_3(self): X0 = bavm.inverse(Y, N=10)[0] np.testing.assert_almost_equal(np.dot(X0, sub.W1), np.kron(Y, np.ones((10,1))) ) np.testing.assert_equal(np.floor(np.abs(X0)), np.zeros(X0.shape)) - + def test_rejection_sample_z(self): np.random.seed(42) df0 = np.random.normal(size=(10,3)) @@ -179,7 +179,7 @@ def test_rejection_sample_z(self): np.random.seed(42) Z = dom.rejection_sampling_z(N, y, W1, W2) - + def test_hit_and_run_z(self): np.random.seed(42) @@ -230,7 +230,7 @@ def test_sample_z(self): np.random.seed(42) Z = dom.sample_z(N, y, W1, W2) - + if __name__ == '__main__': unittest.main() diff --git a/tests/test_gradients.py b/tests/test_gradients.py index e883e8d..ecdba58 100644 --- a/tests/test_gradients.py +++ b/tests/test_gradients.py @@ -4,14 +4,14 @@ import active_subspaces.gradients as gr class TestGradients(TestCase): - writeData = False - + writeData = False + def test_local_linear_gradients(self): np.random.seed(42) X = np.random.uniform(-1.0,1.0,size=(200,2)) f = 2 - 5*X[:,0] + 4*X[:,1] - + df = gr.local_linear_gradients(X, f) M = df.shape[0] np.testing.assert_array_almost_equal(df, np.tile(np.array([-5.0, 4.0]), (M,1)), decimal=9) @@ -19,15 +19,15 @@ def test_local_linear_gradients(self): df = gr.local_linear_gradients(X, f, p=8) M = df.shape[0] np.testing.assert_array_almost_equal(df, np.tile(np.array([-5.0, 4.0]), (M,1)), decimal=9) - - f = 2 - np.sin(X[:,0]) + np.cos(X[:,1]) + + f = 2 - np.sin(X[:,0]) + np.cos(X[:,1]) np.random.seed(1234) df = gr.local_linear_gradients(X, f) - + def test_finite_difference_gradients(self): def myfun(x): return 2 - 5*x[0,0] + 4*x[0,1] - + np.random.seed(42) X = np.random.uniform(-1.0,1.0,size=(10,2)) @@ -37,4 +37,4 @@ def myfun(x): np.testing.assert_array_almost_equal(df, df_test, decimal=6) if __name__ == '__main__': - unittest.main() \ No newline at end of file + unittest.main() diff --git a/tests/test_qp_solver.py b/tests/test_qp_solver.py index 1d0243e..c092245 100644 --- a/tests/test_qp_solver.py +++ b/tests/test_qp_solver.py @@ -93,4 +93,3 @@ def test_bad_solver(self): if __name__ == '__main__': unittest.main() - diff --git a/tests/test_quadrature.py b/tests/test_quadrature.py index 7d62fb7..2f5711c 100644 --- a/tests/test_quadrature.py +++ b/tests/test_quadrature.py @@ -4,7 +4,7 @@ import numpy as np class TestQuadrature(TestCase): - + def test_r_hermite_type_error(self): self.assertRaises(TypeError, gq.r_hermite, 'string') diff --git a/tests/test_response_surfaces.py b/tests/test_response_surfaces.py index 78eda2e..4d24073 100644 --- a/tests/test_response_surfaces.py +++ b/tests/test_response_surfaces.py @@ -130,7 +130,7 @@ def test_exact_polynomial_approximation_2d(self): pr = rs.PolynomialApproximation(N=2) pr.train(X_train, f_2d.reshape((f_2d.size,1))) print 'Rsqr: {:6.4f}'.format(pr.Rsqr) - + X = np.random.normal(size=(10,2)) X_test = X.copy() f, df = pr.predict(X_test, compgrad=True) @@ -277,7 +277,7 @@ def test_rbf_grad_1d(self): gp = rs.RadialBasisApproximation(N=0) gp.train(X_1d, f_1d) - + X = np.random.normal(size=(10,2)) M = X.shape[0] X_1d_test = X[:,0].copy().reshape((M,1)) diff --git a/tests/test_subspaces.py b/tests/test_subspaces.py index a454d9b..c3400a0 100644 --- a/tests/test_subspaces.py +++ b/tests/test_subspaces.py @@ -11,14 +11,14 @@ def test_sorted_eigh(self): C = np.dot(X.transpose(),X) e, W = ss.sorted_eigh(C) np.testing.assert_array_less(e[1], e[0]) - + def test_active_subspace(self): np.random.seed(42) df = np.random.normal(size=(10,3)) weights = np.ones((10,1)) / 10 e, W = ss.active_subspace(df, weights) np.testing.assert_array_less(e[1], e[0]) - + def test_ols_subspace(self): np.random.seed(42) X = np.random.normal(size=(20,3)) @@ -26,7 +26,7 @@ def test_ols_subspace(self): weights = np.ones((20,1)) / 20 e, W = ss.ols_subspace(X, f, weights) np.testing.assert_array_less(e[1], e[0]) - + def test_qphd_subspace(self): np.random.seed(42) X = np.random.normal(size=(50,3)) @@ -34,7 +34,7 @@ def test_qphd_subspace(self): weights = np.ones((50,1)) / 50 e, W = ss.qphd_subspace(X, f, weights) np.testing.assert_array_less(e[1], e[0]) - + def test_opg_subspace(self): np.random.seed(42) X = np.random.normal(size=(50,3)) @@ -42,7 +42,7 @@ def test_opg_subspace(self): weights = np.ones((50,1)) / 50 e, W = ss.opg_subspace(X, f, weights) np.testing.assert_array_less(e[1], e[0]) - + def test_bootstrap_replicate(self): np.random.seed(42) X = np.random.normal(size=(10,3)) @@ -52,26 +52,26 @@ def test_bootstrap_replicate(self): X0, f0, df0, w0 = ss._bootstrap_replicate(X, f, df, weights) assert(np.any(weights==w0[0])) assert(np.any(f==f0[1])) - + def test_bootstrap_ranges(self): np.random.seed(42) X = np.random.normal(size=(50,3)) f = np.random.normal(size=(50,1)) df = np.random.normal(size=(50,3)) weights = np.ones((50,1)) / 50 - + e, W = ss.active_subspace(df, weights) ssmethod = lambda X, f, df, weights: ss.active_subspace(df, weights) d = ss._bootstrap_ranges(e, W, None, None, df, weights, ssmethod, nboot=10) - + e, W = ss.ols_subspace(X, f, weights) ssmethod = lambda X, f, df, weights: ss.ols_subspace(X, f, weights) d = ss._bootstrap_ranges(e, W, X, f, None, weights, ssmethod, nboot=10) - + e, W = ss.qphd_subspace(X, f, weights) ssmethod = lambda X, f, df, weights: ss.qphd_subspace(X, f, weights) d = ss._bootstrap_ranges(e, W, X, f, None, weights, ssmethod, nboot=10) - + e, W = ss.opg_subspace(X, f, weights) ssmethod = lambda X, f, df, weights: ss.opg_subspace(X, f, weights) d = ss._bootstrap_ranges(e, W, X, f, None, weights, ssmethod, nboot=10) @@ -101,30 +101,30 @@ def test_ladle_partition(self): ssmethod = lambda X, f, df, weights: ss.active_subspace(df, weights) e_br, sub_br, li_F = ss._bootstrap_ranges(e, W, None, None, df, weights, ssmethod, nboot=10) d = ss.ladle_partition(e, li_F) - + def test_subspace_class(self): np.random.seed(42) X = np.random.normal(size=(50,3)) f = np.random.normal(size=(50,1)) df = np.random.normal(size=(50,3)) weights = np.ones((50,1)) / 50 - + sub = ss.Subspaces() sub.compute(X, f, df, weights) sub.compute(X, f, df, weights, sstype='AS') sub.compute(X, f, df, weights, sstype='OLS') sub.compute(X, f, df, weights, sstype='QPHD') sub.compute(X, f, df, weights, sstype='OPG') - + sub.compute(X, f, df, weights, sstype='AS', nboot=10) sub.compute(X, f, df, weights, sstype='OLS', nboot=10) sub.compute(X, f, df, weights, sstype='QPHD', nboot=10) sub.compute(X, f, df, weights, sstype='OPG', nboot=10) - + sub.compute(X, f, df, weights, sstype='AS', ptype='EVG', nboot=100) sub.compute(X, f, df, weights, sstype='AS', ptype='RS', nboot=100) sub.compute(X, f, df, weights, sstype='AS', ptype='LI', nboot=100) - + if __name__ == '__main__': unittest.main() From 592136ec7eb34032493da7712edf24f643033c17 Mon Sep 17 00:00:00 2001 From: Takuya Iwanaga Date: Wed, 24 Oct 2018 00:12:38 +1100 Subject: [PATCH 2/3] Use numbers package for instance check --- active_subspaces/domains.py | 9 +++++---- active_subspaces/gradients.py | 3 ++- active_subspaces/integrals.py | 9 +++++---- active_subspaces/response_surfaces.py | 5 +++-- active_subspaces/subspaces.py | 3 ++- active_subspaces/utils/quadrature.py | 9 +++++---- 6 files changed, 22 insertions(+), 16 deletions(-) diff --git a/active_subspaces/domains.py b/active_subspaces/domains.py index 1458229..a18c49f 100644 --- a/active_subspaces/domains.py +++ b/active_subspaces/domains.py @@ -1,5 +1,6 @@ """Utilities for building the domains and maps for active variables.""" import numpy as np +from numbers import Integral from utils.misc import process_inputs, BoundedNormalizer from scipy.spatial import ConvexHull from scipy.misc import comb @@ -210,7 +211,7 @@ def inverse(self, Y, N=1): # check inputs Y, NY, n = process_inputs(Y) - if not isinstance(N, int): + if not isinstance(N, Integral): raise TypeError('N must be an int') Z = self.regularize_z(Y, N) @@ -356,10 +357,10 @@ def nzv(m, n): N : int the number of vertices defining the zonotope """ - if not isinstance(m, int): + if not isinstance(m, Integral): raise TypeError('m should be an integer.') - if not isinstance(n, int): + if not isinstance(n, Integral): raise TypeError('n should be an integer.') # number of zonotope vertices @@ -521,7 +522,7 @@ def sample_z(N, y, W1, W2): Gleich for showing me Chebyshev centers. """ - if not isinstance(N, int): + if not isinstance(N, Integral): raise TypeError('N should be an integer.') Z = rejection_sampling_z(N, y, W1, W2) diff --git a/active_subspaces/gradients.py b/active_subspaces/gradients.py index 5e692d2..126bb0a 100644 --- a/active_subspaces/gradients.py +++ b/active_subspaces/gradients.py @@ -1,5 +1,6 @@ """Utilities for approximating gradients.""" import numpy as np +from numbers import Integral from utils.misc import process_inputs from utils.simrunners import SimulationRunner @@ -39,7 +40,7 @@ def local_linear_gradients(X, f, p=None, weights=None): if p is None: p = int(np.minimum(np.floor(1.7*m), M)) - elif not isinstance(p, int): + elif not isinstance(p, Integral): raise TypeError('p must be an integer.') if p < m+1 or p > M: diff --git a/active_subspaces/integrals.py b/active_subspaces/integrals.py index c2ce2fa..283c217 100644 --- a/active_subspaces/integrals.py +++ b/active_subspaces/integrals.py @@ -2,6 +2,7 @@ import numpy as np import utils.quadrature as gq +from numbers import Integral from utils.misc import conditional_expectations from utils.designs import maximin_design from utils.simrunners import SimulationRunner @@ -51,7 +52,7 @@ def integrate(fun, avmap, N, NMC=10): if not isinstance(avmap, ActiveVariableMap): raise TypeError('avmap should be an ActiveVariableMap.') - if not isinstance(N, int): + if not isinstance(N, Integral): raise TypeError('N should be an integer') # get the quadrature rule @@ -104,7 +105,7 @@ def av_integrate(avfun, avmap, N): if not isinstance(avmap, ActiveVariableMap): raise TypeError('avmap should be an ActiveVariableMap.') - if not isinstance(N, int): + if not isinstance(N, Integral): raise TypeError('N should be an integer.') Yp, Yw = av_quadrature_rule(avmap, N) @@ -163,10 +164,10 @@ def quadrature_rule(avmap, N, NMC=10): if not isinstance(avmap, ActiveVariableMap): raise TypeError('avmap should be an ActiveVariableMap.') - if not isinstance(N, int): + if not isinstance(N, Integral): raise TypeError('N should be an integer.') - if not isinstance(NMC, int): + if not isinstance(NMC, Integral): raise TypeError('NMC should be an integer.') # get quadrature rule on active variables diff --git a/active_subspaces/response_surfaces.py b/active_subspaces/response_surfaces.py index 535606f..ad5be3b 100644 --- a/active_subspaces/response_surfaces.py +++ b/active_subspaces/response_surfaces.py @@ -1,6 +1,7 @@ """Utilities for exploiting active subspaces in response surfaces.""" import numpy as np import utils.designs as dn +from numbers import Integral from utils.simrunners import SimulationRunner from utils.misc import conditional_expectations from utils.response_surfaces import RadialBasisApproximation @@ -270,10 +271,10 @@ def av_design(avmap, N, NMC=10): raise TypeError('avmap should be an ActiveVariableMap.') # interpret N as total number of points in the design - if not isinstance(N, int): + if not isinstance(N, Integral): raise Exception('N should be an integer.') - if not isinstance(NMC, int): + if not isinstance(NMC, Integral): raise Exception('NMC should be an integer.') m, n = avmap.domain.subspaces.W1.shape diff --git a/active_subspaces/subspaces.py b/active_subspaces/subspaces.py index 6e47908..3aeec91 100644 --- a/active_subspaces/subspaces.py +++ b/active_subspaces/subspaces.py @@ -1,6 +1,7 @@ """Utilities for computing active and inactive subspaces.""" from __future__ import division import numpy as np +from numbers import Integral from utils.misc import process_inputs, process_inputs_outputs from utils.response_surfaces import PolynomialApproximation from gradients import local_linear_gradients @@ -170,7 +171,7 @@ def partition(self, n): the dimension of the active subspace """ - if not isinstance(n, int): + if not isinstance(n, Integral): raise TypeError('n should be an integer') m = self.eigenvecs.shape[0] diff --git a/active_subspaces/utils/quadrature.py b/active_subspaces/utils/quadrature.py index 8ed6f26..b01fdb8 100644 --- a/active_subspaces/utils/quadrature.py +++ b/active_subspaces/utils/quadrature.py @@ -5,6 +5,7 @@ import numpy as np import misc as mi +from numbers import Integral def r_hermite(N): """Recurrence coefficients for the Hermite orthogonal polynomials. @@ -30,7 +31,7 @@ def r_hermite(N): https://www.cs.purdue.edu/archives/2002/wxg/codes/OPQ.html. """ - if not isinstance(N, int): + if not isinstance(N, Integral): raise TypeError('N must be an int') if N <= 0: @@ -77,7 +78,7 @@ def r_jacobi(N,l,r,a,b): https://www.cs.purdue.edu/archives/2002/wxg/codes/OPQ.html. """ - if not isinstance(N, int): + if not isinstance(N, Integral): raise TypeError('N must be an int') if N <= 0: @@ -266,7 +267,7 @@ def gauss_hermite(N): https://www.cs.purdue.edu/archives/2002/wxg/codes/OPQ.html. """ - if isinstance(N, int): + if isinstance(N, Integral): N = [N] if type(N) is not list: @@ -309,7 +310,7 @@ def gauss_legendre(N): https://www.cs.purdue.edu/archives/2002/wxg/codes/OPQ.html. """ - if isinstance(N, int): + if isinstance(N, Integral): N = [N] if type(N) is not list: From 45d118852fc5ca6ce861c740bb7bc3d845ae8dc6 Mon Sep 17 00:00:00 2001 From: Takuya Iwanaga Date: Wed, 24 Oct 2018 00:14:00 +1100 Subject: [PATCH 3/3] Flatten array for future changes --- active_subspaces/utils/plotters.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/active_subspaces/utils/plotters.py b/active_subspaces/utils/plotters.py index 2ffaede..abb0ec1 100644 --- a/active_subspaces/utils/plotters.py +++ b/active_subspaces/utils/plotters.py @@ -376,6 +376,8 @@ def sufficient_summary(y, f, out_label=None, opts=None): plt.figure(figsize=(7,7)) plt.rc('font', **opts['myfont']) + if isinstance(f, np.ndarray): + f = f.flatten() plt.scatter(y1, y2, c=f, s=150.0, vmin=np.min(f), vmax=np.max(f)) plt.xlabel('Active variable 1') plt.ylabel('Active variable 2')